aioredis-1.0.0/0000755000175000017500000000000013203634127014110 5ustar alexeyalexey00000000000000aioredis-1.0.0/aioredis/0000755000175000017500000000000013203634127015707 5ustar alexeyalexey00000000000000aioredis-1.0.0/aioredis/commands/0000755000175000017500000000000013203634127017510 5ustar alexeyalexey00000000000000aioredis-1.0.0/aioredis/commands/generic.py0000644000175000017500000002444413203624357021512 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, wait_ok, _NOTSET, _ScanIter class GenericCommandsMixin: """Generic commands mixin. For commands details see: http://redis.io/commands/#generic """ def delete(self, key, *keys): """Delete a key.""" fut = self.execute(b'DEL', key, *keys) return wait_convert(fut, int) def dump(self, key): """Dump a key.""" return self.execute(b'DUMP', key) def exists(self, key, *keys): """Check if key(s) exists. .. versionchanged:: v0.2.9 Accept multiple keys; **return** type **changed** from bool to int. """ return self.execute(b'EXISTS', key, *keys) def expire(self, key, timeout): """Set a timeout on key. if timeout is float it will be multiplied by 1000 coerced to int and passed to `pexpire` method. Otherwise raises TypeError if timeout argument is not int. """ if isinstance(timeout, float): return self.pexpire(key, int(timeout * 1000)) if not isinstance(timeout, int): raise TypeError( "timeout argument must be int, not {!r}".format(timeout)) fut = self.execute(b'EXPIRE', key, timeout) return wait_convert(fut, bool) def expireat(self, key, timestamp): """Set expire timestamp on a key. if timeout is float it will be multiplied by 1000 coerced to int and passed to `pexpireat` method. Otherwise raises TypeError if timestamp argument is not int. """ if isinstance(timestamp, float): return self.pexpireat(key, int(timestamp * 1000)) if not isinstance(timestamp, int): raise TypeError("timestamp argument must be int, not {!r}" .format(timestamp)) fut = self.execute(b'EXPIREAT', key, timestamp) return wait_convert(fut, bool) def keys(self, pattern, *, encoding=_NOTSET): """Returns all keys matching pattern.""" return self.execute(b'KEYS', pattern, encoding=encoding) def migrate(self, host, port, key, dest_db, timeout, *, copy=False, replace=False): """Atomically transfer a key from a Redis instance to another one.""" if not isinstance(host, str): raise TypeError("host argument must be str") if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if not isinstance(dest_db, int): raise TypeError("dest_db argument must be int") if not host: raise ValueError("Got empty host") if dest_db < 0: raise ValueError("dest_db must be greater equal 0") if timeout < 0: raise ValueError("timeout must be greater equal 0") flags = [] if copy: flags.append(b'COPY') if replace: flags.append(b'REPLACE') fut = self.execute(b'MIGRATE', host, port, key, dest_db, timeout, *flags) return wait_ok(fut) def migrate_keys(self, host, port, keys, dest_db, timeout, *, copy=False, replace=False): """Atomically transfer keys from one Redis instance to another one. Keys argument must be list/tuple of keys to migrate. """ if not isinstance(host, str): raise TypeError("host argument must be str") if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if not isinstance(dest_db, int): raise TypeError("dest_db argument must be int") if not isinstance(keys, (list, tuple)): raise TypeError("keys argument must be list or tuple") if not host: raise ValueError("Got empty host") if dest_db < 0: raise ValueError("dest_db must be greater equal 0") if timeout < 0: raise ValueError("timeout must be greater equal 0") if not keys: raise ValueError("keys must not be empty") flags = [] if copy: flags.append(b'COPY') if replace: flags.append(b'REPLACE') flags.append(b'KEYS') flags.extend(keys) fut = self.execute(b'MIGRATE', host, port, "", dest_db, timeout, *flags) return wait_ok(fut) def move(self, key, db): """Move key from currently selected database to specified destination. :raises TypeError: if db is not int :raises ValueError: if db is less then 0 """ if not isinstance(db, int): raise TypeError("db argument must be int, not {!r}".format(db)) if db < 0: raise ValueError("db argument must be not less then 0, {!r}" .format(db)) fut = self.execute(b'MOVE', key, db) return wait_convert(fut, bool) def object_refcount(self, key): """Returns the number of references of the value associated with the specified key (OBJECT REFCOUNT). """ return self.execute(b'OBJECT', b'REFCOUNT', key) def object_encoding(self, key): """Returns the kind of internal representation used in order to store the value associated with a key (OBJECT ENCODING). """ # TODO: set default encoding to 'utf-8' return self.execute(b'OBJECT', b'ENCODING', key) def object_idletime(self, key): """Returns the number of seconds since the object is not requested by read or write operations (OBJECT IDLETIME). """ return self.execute(b'OBJECT', b'IDLETIME', key) def persist(self, key): """Remove the existing timeout on key.""" fut = self.execute(b'PERSIST', key) return wait_convert(fut, bool) def pexpire(self, key, timeout): """Set a milliseconds timeout on key. :raises TypeError: if timeout is not int """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int, not {!r}" .format(timeout)) fut = self.execute(b'PEXPIRE', key, timeout) return wait_convert(fut, bool) def pexpireat(self, key, timestamp): """Set expire timestamp on key, timestamp in milliseconds. :raises TypeError: if timeout is not int """ if not isinstance(timestamp, int): raise TypeError("timestamp argument must be int, not {!r}" .format(timestamp)) fut = self.execute(b'PEXPIREAT', key, timestamp) return wait_convert(fut, bool) def pttl(self, key): """Returns time-to-live for a key, in milliseconds. Special return values (starting with Redis 2.8): * command returns -2 if the key does not exist. * command returns -1 if the key exists but has no associated expire. """ # TODO: maybe convert negative values to: # -2 to None - no key # -1 to False - no expire return self.execute(b'PTTL', key) def randomkey(self, *, encoding=_NOTSET): """Return a random key from the currently selected database.""" return self.execute(b'RANDOMKEY', encoding=encoding) def rename(self, key, newkey): """Renames key to newkey. :raises ValueError: if key == newkey """ if key == newkey: raise ValueError("key and newkey are the same") fut = self.execute(b'RENAME', key, newkey) return wait_ok(fut) def renamenx(self, key, newkey): """Renames key to newkey only if newkey does not exist. :raises ValueError: if key == newkey """ if key == newkey: raise ValueError("key and newkey are the same") fut = self.execute(b'RENAMENX', key, newkey) return wait_convert(fut, bool) def restore(self, key, ttl, value): """Creates a key associated with a value that is obtained via DUMP.""" return self.execute(b'RESTORE', key, ttl, value) def scan(self, cursor=0, match=None, count=None): """Incrementally iterate the keys space. Usage example: >>> match = 'something*' >>> cur = b'0' >>> while cur: ... cur, keys = await redis.scan(cur, match=match) ... for key in keys: ... print('Matched:', key) """ args = [] if match is not None: args += [b'MATCH', match] if count is not None: args += [b'COUNT', count] fut = self.execute(b'SCAN', cursor, *args) return wait_convert(fut, lambda o: (int(o[0]), o[1])) def iscan(self, *, match=None, count=None): """Incrementally iterate the keys space using async for. Usage example: >>> async for key in redis.iscan(match='something*'): ... print('Matched:', key) """ return _ScanIter(lambda cur: self.scan(cur, match=match, count=count)) def sort(self, key, *get_patterns, by=None, offset=None, count=None, asc=None, alpha=False, store=None): """Sort the elements in a list, set or sorted set.""" args = [] if by is not None: args += [b'BY', by] if offset is not None and count is not None: args += [b'LIMIT', offset, count] if get_patterns: args += sum(([b'GET', pattern] for pattern in get_patterns), []) if asc is not None: args += [asc is True and b'ASC' or b'DESC'] if alpha: args += [b'ALPHA'] if store is not None: args += [b'STORE', store] return self.execute(b'SORT', key, *args) def ttl(self, key): """Returns time-to-live for a key, in seconds. Special return values (starting with Redis 2.8): * command returns -2 if the key does not exist. * command returns -1 if the key exists but has no associated expire. """ # TODO: maybe convert negative values to: # -2 to None - no key # -1 to False - no expire return self.execute(b'TTL', key) def type(self, key): """Returns the string representation of the value's type stored at key. """ # NOTE: for non-existent keys TYPE returns b'none' return self.execute(b'TYPE', key) aioredis-1.0.0/aioredis/commands/hyperloglog.py0000644000175000017500000000141613203624357022423 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class HyperLogLogCommandsMixin: """HyperLogLog commands mixin. For commands details see: http://redis.io/commands#hyperloglog """ def pfadd(self, key, value, *values): """Adds the specified elements to the specified HyperLogLog.""" return self.execute(b'PFADD', key, value, *values) def pfcount(self, key, *keys): """Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s). """ return self.execute(b'PFCOUNT', key, *keys) def pfmerge(self, destkey, sourcekey, *sourcekeys): """Merge N different HyperLogLogs into a single one.""" fut = self.execute(b'PFMERGE', destkey, sourcekey, *sourcekeys) return wait_ok(fut) aioredis-1.0.0/aioredis/commands/string.py0000644000175000017500000002113713203624357021400 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, wait_ok, _NOTSET class StringCommandsMixin: """String commands mixin. For commands details see: http://redis.io/commands/#string """ SET_IF_NOT_EXIST = 'SET_IF_NOT_EXIST' # NX SET_IF_EXIST = 'SET_IF_EXIST' # XX def append(self, key, value): """Append a value to key.""" return self.execute(b'APPEND', key, value) def bitcount(self, key, start=None, end=None): """Count set bits in a string. :raises TypeError: if only start or end specified. """ if start is None and end is not None: raise TypeError("both start and stop must be specified") elif start is not None and end is None: raise TypeError("both start and stop must be specified") elif start is not None and end is not None: args = (start, end) else: args = () return self.execute(b'BITCOUNT', key, *args) def bitop_and(self, dest, key, *keys): """Perform bitwise AND operations between strings.""" return self.execute(b'BITOP', b'AND', dest, key, *keys) def bitop_or(self, dest, key, *keys): """Perform bitwise OR operations between strings.""" return self.execute(b'BITOP', b'OR', dest, key, *keys) def bitop_xor(self, dest, key, *keys): """Perform bitwise XOR operations between strings.""" return self.execute(b'BITOP', b'XOR', dest, key, *keys) def bitop_not(self, dest, key): """Perform bitwise NOT operations between strings.""" return self.execute(b'BITOP', b'NOT', dest, key) def bitpos(self, key, bit, start=None, end=None): """Find first bit set or clear in a string. :raises ValueError: if bit is not 0 or 1 """ if bit not in (1, 0): raise ValueError("bit argument must be either 1 or 0") bytes_range = [] if start is not None: bytes_range.append(start) if end is not None: if start is None: bytes_range = [0, end] else: bytes_range.append(end) return self.execute(b'BITPOS', key, bit, *bytes_range) def decr(self, key): """Decrement the integer value of a key by one.""" return self.execute(b'DECR', key) def decrby(self, key, decrement): """Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int """ if not isinstance(decrement, int): raise TypeError("decrement must be of type int") return self.execute(b'DECRBY', key, decrement) def get(self, key, *, encoding=_NOTSET): """Get the value of a key.""" return self.execute(b'GET', key, encoding=encoding) def getbit(self, key, offset): """Returns the bit value at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less then 0 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") return self.execute(b'GETBIT', key, offset) def getrange(self, key, start, end, *, encoding=_NOTSET): """Get a substring of the string stored at a key. :raises TypeError: if start or end is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(end, int): raise TypeError("end argument must be int") return self.execute(b'GETRANGE', key, start, end, encoding=encoding) def getset(self, key, value, *, encoding=_NOTSET): """Set the string value of a key and return its old value.""" return self.execute(b'GETSET', key, value, encoding=encoding) def incr(self, key): """Increment the integer value of a key by one.""" return self.execute(b'INCR', key) def incrby(self, key, increment): """Increment the integer value of a key by the given amount. :raises TypeError: if increment is not int """ if not isinstance(increment, int): raise TypeError("increment must be of type int") return self.execute(b'INCRBY', key, increment) def incrbyfloat(self, key, increment): """Increment the float value of a key by the given amount. :raises TypeError: if increment is not int """ if not isinstance(increment, float): raise TypeError("increment must be of type int") fut = self.execute(b'INCRBYFLOAT', key, increment) return wait_convert(fut, float) def mget(self, key, *keys, encoding=_NOTSET): """Get the values of all the given keys.""" return self.execute(b'MGET', key, *keys, encoding=encoding) def mset(self, key, value, *pairs): """Set multiple keys to multiple values. :raises TypeError: if len of pairs is not event number """ if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") fut = self.execute(b'MSET', key, value, *pairs) return wait_ok(fut) def msetnx(self, key, value, *pairs): """Set multiple keys to multiple values, only if none of the keys exist. :raises TypeError: if len of pairs is not event number """ if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") return self.execute(b'MSETNX', key, value, *pairs) def psetex(self, key, milliseconds, value): """Set the value and expiration in milliseconds of a key. :raises TypeError: if milliseconds is not int """ if not isinstance(milliseconds, int): raise TypeError("milliseconds argument must be int") fut = self.execute(b'PSETEX', key, milliseconds, value) return wait_ok(fut) def set(self, key, value, *, expire=0, pexpire=0, exist=None): """Set the string value of a key. :raises TypeError: if expire or pexpire is not int """ if expire and not isinstance(expire, int): raise TypeError("expire argument must be int") if pexpire and not isinstance(pexpire, int): raise TypeError("pexpire argument must be int") args = [] if expire: args[:] = [b'EX', expire] if pexpire: args[:] = [b'PX', pexpire] if exist is self.SET_IF_EXIST: args.append(b'XX') elif exist is self.SET_IF_NOT_EXIST: args.append(b'NX') fut = self.execute(b'SET', key, value, *args) return wait_ok(fut) def setbit(self, key, offset, value): """Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less then 0 or value is not 0 or 1 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") if value not in (0, 1): raise ValueError("value argument must be either 1 or 0") return self.execute(b'SETBIT', key, offset, value) def setex(self, key, seconds, value): """Set the value and expiration of a key. If seconds is float it will be multiplied by 1000 coerced to int and passed to `psetex` method. :raises TypeError: if seconds is neither int nor float """ if isinstance(seconds, float): return self.psetex(key, int(seconds * 1000), value) if not isinstance(seconds, int): raise TypeError("milliseconds argument must be int") fut = self.execute(b'SETEX', key, seconds, value) return wait_ok(fut) def setnx(self, key, value): """Set the value of a key, only if the key does not exist.""" fut = self.execute(b'SETNX', key, value) return wait_convert(fut, bool) def setrange(self, key, offset, value): """Overwrite part of a string at key starting at the specified offset. :raises TypeError: if offset is not int :raises ValueError: if offset less then 0 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") return self.execute(b'SETRANGE', key, offset, value) def strlen(self, key): """Get the length of the value stored in a key.""" return self.execute(b'STRLEN', key) aioredis-1.0.0/aioredis/commands/scripting.py0000644000175000017500000000224313203624357022071 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class ScriptingCommandsMixin: """Set commands mixin. For commands details see: http://redis.io/commands#scripting """ def eval(self, script, keys=[], args=[]): """Execute a Lua script server side.""" return self.execute(b'EVAL', script, len(keys), *(keys + args)) def evalsha(self, digest, keys=[], args=[]): """Execute a Lua script server side by its SHA1 digest.""" return self.execute(b'EVALSHA', digest, len(keys), *(keys + args)) def script_exists(self, digest, *digests): """Check existence of scripts in the script cache.""" return self.execute(b'SCRIPT', b'EXISTS', digest, *digests) def script_kill(self): """Kill the script currently in execution.""" fut = self.execute(b'SCRIPT', b'KILL') return wait_ok(fut) def script_flush(self): """Remove all the scripts from the script cache.""" fut = self.execute(b"SCRIPT", b"FLUSH") return wait_ok(fut) def script_load(self, script): """Load the specified Lua script into the script cache.""" return self.execute(b"SCRIPT", b"LOAD", script) aioredis-1.0.0/aioredis/commands/sorted_set.py0000644000175000017500000004314013203624357022243 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, _NOTSET, _ScanIter class SortedSetCommandsMixin: """Sorted Sets commands mixin. For commands details see: http://redis.io/commands/#sorted_set """ ZSET_EXCLUDE_MIN = 'ZSET_EXCLUDE_MIN' ZSET_EXCLUDE_MAX = 'ZSET_EXCLUDE_MAX' ZSET_EXCLUDE_BOTH = 'ZSET_EXCLUDE_BOTH' ZSET_AGGREGATE_SUM = 'ZSET_AGGREGATE_SUM' ZSET_AGGREGATE_MIN = 'ZSET_AGGREGATE_MIN' ZSET_AGGREGATE_MAX = 'ZSET_AGGREGATE_MAX' ZSET_IF_NOT_EXIST = 'ZSET_IF_NOT_EXIST' # NX ZSET_IF_EXIST = 'ZSET_IF_EXIST' # XX def zadd(self, key, score, member, *pairs, exist=None): """Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float :raises TypeError: length of pairs is not even number """ if not isinstance(score, (int, float)): raise TypeError("score argument must be int or float") if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") scores = (item for i, item in enumerate(pairs) if i % 2 == 0) if any(not isinstance(s, (int, float)) for s in scores): raise TypeError("all scores must be int or float") args = [] if exist is self.ZSET_IF_EXIST: args.append(b'XX') elif exist is self.ZSET_IF_NOT_EXIST: args.append(b'NX') args.extend([score, member]) if pairs: args.extend(pairs) return self.execute(b'ZADD', key, *args) def zcard(self, key): """Get the number of members in a sorted set.""" return self.execute(b'ZCARD', key) def zcount(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min grater then max """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if min > max: raise ValueError("min could not be grater then max") return self.execute(b'ZCOUNT', key, *_encode_min_max(exclude, min, max)) def zincrby(self, key, increment, member): """Increment the score of a member in a sorted set. :raises TypeError: increment is not float or int """ if not isinstance(increment, (int, float)): raise TypeError("increment argument must be int or float") fut = self.execute(b'ZINCRBY', key, increment, member) return wait_convert(fut, int_or_float) def zinterstore(self, destkey, key, *keys, with_weights=False, aggregate=None): """Intersect multiple sorted sets and store result in a new key. :param bool with_weights: when set to true each key must be a tuple in form of (key, weight) """ keys = (key,) + keys numkeys = len(keys) args = [] if with_weights: assert all(isinstance(val, (list, tuple)) for val in keys), ( "All key arguments must be (key, weight) tuples") weights = ['WEIGHTS'] for key, weight in keys: args.append(key) weights.append(weight) args.extend(weights) else: args.extend(keys) if aggregate is self.ZSET_AGGREGATE_SUM: args.extend(('AGGREGATE', 'SUM')) elif aggregate is self.ZSET_AGGREGATE_MAX: args.extend(('AGGREGATE', 'MAX')) elif aggregate is self.ZSET_AGGREGATE_MIN: args.extend(('AGGREGATE', 'MIN')) fut = self.execute(b'ZINTERSTORE', destkey, numkeys, *args) return fut def zlexcount(self, key, min=b'-', max=b'+', include_min=True, include_max=True): """Count the number of members in a sorted set between a given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME Why only bytes? raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self.execute(b'ZLEXCOUNT', key, min, max) def zrange(self, key, start=0, stop=-1, withscores=False, encoding=_NOTSET): """Return a range of members in a sorted set, by index. :raises TypeError: if start is not int :raises TypeError: if stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") if withscores: args = [b'WITHSCORES'] else: args = [] fut = self.execute(b'ZRANGE', key, start, stop, *args, encoding=encoding) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True, offset=None, count=None, encoding=_NOTSET): """Return a range of members in a sorted set, by lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not bytes :raises TypeError: if count is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") args = [] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) return self.execute(b'ZRANGEBYLEX', key, min, max, *args, encoding=encoding) def zrangebyscore(self, key, min=float('-inf'), max=float('inf'), withscores=False, offset=None, count=None, *, exclude=None, encoding=_NOTSET): """Return a range of members in a sorted set, by score. :raises TypeError: if min or max is not float or int :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not int :raises TypeError: if count is not int """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") min, max = _encode_min_max(exclude, min, max) args = [] if withscores: args = [b'WITHSCORES'] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) fut = self.execute(b'ZRANGEBYSCORE', key, min, max, *args, encoding=encoding) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrank(self, key, member): """Determine the index of a member in a sorted set.""" return self.execute(b'ZRANK', key, member) def zrem(self, key, member, *members): """Remove one or more members from a sorted set.""" return self.execute(b'ZREM', key, member, *members) def zremrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True): """Remove all members in a sorted set between the given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self.execute(b'ZREMRANGEBYLEX', key, min, max) def zremrangebyrank(self, key, start, stop): """Remove all members in a sorted set within the given indexes. :raises TypeError: if start is not int :raises TypeError: if stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") return self.execute(b'ZREMRANGEBYRANK', key, start, stop) def zremrangebyscore(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Remove all members in a sorted set within the given scores. :raises TypeError: if min or max is not int or float """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") min, max = _encode_min_max(exclude, min, max) return self.execute(b'ZREMRANGEBYSCORE', key, min, max) def zrevrange(self, key, start, stop, withscores=False, encoding=_NOTSET): """Return a range of members in a sorted set, by index, with scores ordered from high to low. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") if withscores: args = [b'WITHSCORES'] else: args = [] fut = self.execute(b'ZREVRANGE', key, start, stop, *args, encoding=encoding) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrevrangebyscore(self, key, max=float('inf'), min=float('-inf'), *, exclude=None, withscores=False, offset=None, count=None, encoding=_NOTSET): """Return a range of members in a sorted set, by score, with scores ordered from high to low. :raises TypeError: if min or max is not float or int :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not int :raises TypeError: if count is not int """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") min, max = _encode_min_max(exclude, min, max) args = [] if withscores: args = [b'WITHSCORES'] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) fut = self.execute(b'ZREVRANGEBYSCORE', key, max, min, *args, encoding=encoding) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrevrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True, offset=None, count=None, encoding=_NOTSET): """Return a range of members in a sorted set, by lexicographical range from high to low. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not bytes :raises TypeError: if count is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") args = [] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) return self.execute(b'ZREVRANGEBYLEX', key, max, min, *args, encoding=encoding) def zrevrank(self, key, member): """Determine the index of a member in a sorted set, with scores ordered from high to low. """ return self.execute(b'ZREVRANK', key, member) def zscore(self, key, member): """Get the score associated with the given member in a sorted set.""" fut = self.execute(b'ZSCORE', key, member) return wait_convert(fut, optional_int_or_float) def zunionstore(self, destkey, key, *keys, with_weights=False, aggregate=None): """Add multiple sorted sets and store result in a new key.""" keys = (key,) + keys numkeys = len(keys) args = [] if with_weights: assert all(isinstance(val, (list, tuple)) for val in keys), ( "All key arguments must be (key, weight) tuples") weights = ['WEIGHTS'] for key, weight in keys: args.append(key) weights.append(weight) args.extend(weights) else: args.extend(keys) if aggregate is self.ZSET_AGGREGATE_SUM: args.extend(('AGGREGATE', 'SUM')) elif aggregate is self.ZSET_AGGREGATE_MAX: args.extend(('AGGREGATE', 'MAX')) elif aggregate is self.ZSET_AGGREGATE_MIN: args.extend(('AGGREGATE', 'MIN')) fut = self.execute(b'ZUNIONSTORE', destkey, numkeys, *args) return fut def zscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate sorted sets elements and associated scores.""" args = [] if match is not None: args += [b'MATCH', match] if count is not None: args += [b'COUNT', count] fut = self.execute(b'ZSCAN', key, cursor, *args) def _converter(obj): return (int(obj[0]), pairs_int_or_float(obj[1])) return wait_convert(fut, _converter) def izscan(self, key, *, match=None, count=None): """Incrementally iterate sorted set items using async for. Usage example: >>> async for val, score in redis.izscan(key, match='something*'): ... print('Matched:', val, ':', score) """ return _ScanIter(lambda cur: self.zscan(key, cur, match=match, count=count)) def _encode_min_max(flag, min, max): if flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MIN: return '({}'.format(min), max elif flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MAX: return min, '({}'.format(max) elif flag is SortedSetCommandsMixin.ZSET_EXCLUDE_BOTH: return '({}'.format(min), '({}'.format(max) return min, max def int_or_float(value): assert isinstance(value, (str, bytes)), 'raw_value must be bytes' try: return int(value) except ValueError: return float(value) def optional_int_or_float(value): if value is None: return value return int_or_float(value) def pairs_int_or_float(value): it = iter(value) return [(val, int_or_float(score)) for val, score in zip(it, it)] aioredis-1.0.0/aioredis/commands/hash.py0000644000175000017500000001214713203624357021016 0ustar alexeyalexey00000000000000from itertools import chain from aioredis.util import ( wait_ok, wait_convert, wait_make_dict, _NOTSET, _ScanIter, ) class HashCommandsMixin: """Hash commands mixin. For commands details see: http://redis.io/commands#hash """ def hdel(self, key, field, *fields): """Delete one or more hash fields.""" return self.execute(b'HDEL', key, field, *fields) def hexists(self, key, field): """Determine if hash field exists.""" fut = self.execute(b'HEXISTS', key, field) return wait_convert(fut, bool) def hget(self, key, field, *, encoding=_NOTSET): """Get the value of a hash field.""" return self.execute(b'HGET', key, field, encoding=encoding) def hgetall(self, key, *, encoding=_NOTSET): """Get all the fields and values in a hash.""" fut = self.execute(b'HGETALL', key, encoding=encoding) return wait_make_dict(fut) def hincrby(self, key, field, increment=1): """Increment the integer value of a hash field by the given number.""" return self.execute(b'HINCRBY', key, field, increment) def hincrbyfloat(self, key, field, increment=1.0): """Increment the float value of a hash field by the given number.""" fut = self.execute(b'HINCRBYFLOAT', key, field, increment) return wait_convert(fut, float) def hkeys(self, key, *, encoding=_NOTSET): """Get all the fields in a hash.""" return self.execute(b'HKEYS', key, encoding=encoding) def hlen(self, key): """Get the number of fields in a hash.""" return self.execute(b'HLEN', key) def hmget(self, key, field, *fields, encoding=_NOTSET): """Get the values of all the given fields.""" return self.execute(b'HMGET', key, field, *fields, encoding=encoding) def hmset(self, key, field, value, *pairs): """Set multiple hash fields to multiple values.""" if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") return wait_ok(self.execute(b'HMSET', key, field, value, *pairs)) def hmset_dict(self, key, *args, **kwargs): """Set multiple hash fields to multiple values. dict can be passed as first positional argument: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1', 'field2': 'value2'}) or keyword arguments can be used: >>> await redis.hmset_dict( ... 'key', field1='value1', field2='value2') or dict argument can be mixed with kwargs: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1'}, field2='value2') .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary, if both specified and both have same key(s) -- ``kwargs`` will win: >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz') >>> await redis.hget('key', 'foo', encoding='utf-8') 'baz' """ if not args and not kwargs: raise TypeError("args or kwargs must be specified") pairs = () if len(args) > 1: raise TypeError("single positional argument allowed") elif len(args) == 1: if not isinstance(args[0], dict): raise TypeError("args[0] must be dict") elif not args[0] and not kwargs: raise ValueError("args[0] is empty dict") pairs = chain.from_iterable(args[0].items()) kwargs_pairs = chain.from_iterable(kwargs.items()) return wait_ok(self.execute( b'HMSET', key, *chain(pairs, kwargs_pairs))) def hset(self, key, field, value): """Set the string value of a hash field.""" return self.execute(b'HSET', key, field, value) def hsetnx(self, key, field, value): """Set the value of a hash field, only if the field does not exist.""" return self.execute(b'HSETNX', key, field, value) def hvals(self, key, *, encoding=_NOTSET): """Get all the values in a hash.""" return self.execute(b'HVALS', key, encoding=encoding) def hscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate hash fields and associated values.""" args = [key, cursor] match is not None and args.extend([b'MATCH', match]) count is not None and args.extend([b'COUNT', count]) fut = self.execute(b'HSCAN', *args) return wait_convert(fut, _make_pairs) def ihscan(self, key, *, match=None, count=None): """Incrementally iterate sorted set items using async for. Usage example: >>> async for name, val in redis.ihscan(key, match='something*'): ... print('Matched:', name, '->', val) """ return _ScanIter(lambda cur: self.hscan(key, cur, match=match, count=count)) def hstrlen(self, key, field): """Get the length of the value of a hash field.""" return self.execute(b'HSTRLEN', key, field) def _make_pairs(obj): it = iter(obj[1]) return (int(obj[0]), list(zip(it, it))) aioredis-1.0.0/aioredis/commands/list.py0000644000175000017500000001337313203624357021050 0ustar alexeyalexey00000000000000from aioredis.util import _NOTSET, wait_ok class ListCommandsMixin: """List commands mixin. For commands details see: http://redis.io/commands#list """ def blpop(self, key, *keys, timeout=0, encoding=_NOTSET): """Remove and get the first element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") args = keys + (timeout,) return self.execute(b'BLPOP', key, *args, encoding=encoding) def brpop(self, key, *keys, timeout=0, encoding=_NOTSET): """Remove and get the last element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") args = keys + (timeout,) return self.execute(b'BRPOP', key, *args, encoding=encoding) def brpoplpush(self, sourcekey, destkey, timeout=0, encoding=_NOTSET): """Remove and get the last element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") return self.execute(b'BRPOPLPUSH', sourcekey, destkey, timeout, encoding=encoding) def lindex(self, key, index, *, encoding=_NOTSET): """Get an element from a list by its index. :raises TypeError: if index is not int """ if not isinstance(index, int): raise TypeError("index argument must be int") return self.execute(b'LINDEX', key, index, encoding=encoding) def linsert(self, key, pivot, value, before=False): """Inserts value in the list stored at key either before or after the reference value pivot. """ where = b'AFTER' if not before else b'BEFORE' return self.execute(b'LINSERT', key, where, pivot, value) def llen(self, key): """Returns the length of the list stored at key.""" return self.execute(b'LLEN', key) def lpop(self, key, *, encoding=_NOTSET): """Removes and returns the first element of the list stored at key.""" return self.execute(b'LPOP', key, encoding=encoding) def lpush(self, key, value, *values): """Insert all the specified values at the head of the list stored at key. """ return self.execute(b'LPUSH', key, value, *values) def lpushx(self, key, value): """Inserts value at the head of the list stored at key, only if key already exists and holds a list. """ return self.execute(b'LPUSHX', key, value) def lrange(self, key, start, stop, *, encoding=_NOTSET): """Returns the specified elements of the list stored at key. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") return self.execute(b'LRANGE', key, start, stop, encoding=encoding) def lrem(self, key, count, value): """Removes the first count occurrences of elements equal to value from the list stored at key. :raises TypeError: if count is not int """ if not isinstance(count, int): raise TypeError("count argument must be int") return self.execute(b'LREM', key, count, value) def lset(self, key, index, value): """Sets the list element at index to value. :raises TypeError: if index is not int """ if not isinstance(index, int): raise TypeError("index argument must be int") return self.execute(b'LSET', key, index, value) def ltrim(self, key, start, stop): """Trim an existing list so that it will contain only the specified range of elements specified. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") fut = self.execute(b'LTRIM', key, start, stop) return wait_ok(fut) def rpop(self, key, *, encoding=_NOTSET): """Removes and returns the last element of the list stored at key.""" return self.execute(b'RPOP', key, encoding=encoding) def rpoplpush(self, sourcekey, destkey, *, encoding=_NOTSET): """Atomically returns and removes the last element (tail) of the list stored at source, and pushes the element at the first element (head) of the list stored at destination. """ return self.execute(b'RPOPLPUSH', sourcekey, destkey, encoding=encoding) def rpush(self, key, value, *values): """Insert all the specified values at the tail of the list stored at key. """ return self.execute(b'RPUSH', key, value, *values) def rpushx(self, key, value): """Inserts value at the tail of the list stored at key, only if key already exists and holds a list. """ return self.execute(b'RPUSHX', key, value) aioredis-1.0.0/aioredis/commands/set.py0000644000175000017500000000632313203624357020665 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, _NOTSET, _ScanIter class SetCommandsMixin: """Set commands mixin. For commands details see: http://redis.io/commands#set """ def sadd(self, key, member, *members): """Add one or more members to a set.""" return self.execute(b'SADD', key, member, *members) def scard(self, key): """Get the number of members in a set.""" return self.execute(b'SCARD', key) def sdiff(self, key, *keys): """Subtract multiple sets.""" return self.execute(b'SDIFF', key, *keys) def sdiffstore(self, destkey, key, *keys): """Subtract multiple sets and store the resulting set in a key.""" return self.execute(b'SDIFFSTORE', destkey, key, *keys) def sinter(self, key, *keys): """Intersect multiple sets.""" return self.execute(b'SINTER', key, *keys) def sinterstore(self, destkey, key, *keys): """Intersect multiple sets and store the resulting set in a key.""" return self.execute(b'SINTERSTORE', destkey, key, *keys) def sismember(self, key, member): """Determine if a given value is a member of a set.""" return self.execute(b'SISMEMBER', key, member) def smembers(self, key, *, encoding=_NOTSET): """Get all the members in a set.""" return self.execute(b'SMEMBERS', key, encoding=encoding) def smove(self, sourcekey, destkey, member): """Move a member from one set to another.""" return self.execute(b'SMOVE', sourcekey, destkey, member) def spop(self, key, *, encoding=_NOTSET): """Remove and return a random member from a set.""" return self.execute(b'SPOP', key, encoding=encoding) def srandmember(self, key, count=None, *, encoding=_NOTSET): """Get one or multiple random members from a set.""" args = [key] count is not None and args.append(count) return self.execute(b'SRANDMEMBER', *args, encoding=encoding) def srem(self, key, member, *members): """Remove one or more members from a set.""" return self.execute(b'SREM', key, member, *members) def sunion(self, key, *keys): """Add multiple sets.""" return self.execute(b'SUNION', key, *keys) def sunionstore(self, destkey, key, *keys): """Add multiple sets and store the resulting set in a key.""" return self.execute(b'SUNIONSTORE', destkey, key, *keys) def sscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate Set elements.""" tokens = [key, cursor] match is not None and tokens.extend([b'MATCH', match]) count is not None and tokens.extend([b'COUNT', count]) fut = self.execute(b'SSCAN', *tokens) return wait_convert(fut, lambda obj: (int(obj[0]), obj[1])) def isscan(self, key, *, match=None, count=None): """Incrementally iterate set elements using async for. Usage example: >>> async for val in redis.isscan(key, match='something*'): ... print('Matched:', val) """ return _ScanIter(lambda cur: self.sscan(key, cur, match=match, count=count)) aioredis-1.0.0/aioredis/commands/server.py0000644000175000017500000002300313203624357021372 0ustar alexeyalexey00000000000000from collections import namedtuple from aioredis.util import wait_ok, wait_convert, wait_make_dict, _NOTSET from aioredis.log import logger class ServerCommandsMixin: """Server commands mixin. For commands details see: http://redis.io/commands/#server """ SHUTDOWN_SAVE = 'SHUTDOWN_SAVE' SHUTDOWN_NOSAVE = 'SHUTDOWN_NOSAVE' def bgrewriteaof(self): """Asynchronously rewrite the append-only file.""" fut = self.execute(b'BGREWRITEAOF') return wait_ok(fut) def bgsave(self): """Asynchronously save the dataset to disk.""" fut = self.execute(b'BGSAVE') return wait_ok(fut) def client_kill(self): """Kill the connection of a client. .. warning:: Not Implemented """ raise NotImplementedError def client_list(self): """Get the list of client connections. Returns list of ClientInfo named tuples. """ fut = self.execute(b'CLIENT', b'LIST', encoding='utf-8') return wait_convert(fut, to_tuples) def client_getname(self, encoding=_NOTSET): """Get the current connection name.""" return self.execute(b'CLIENT', b'GETNAME', encoding=encoding) def client_pause(self, timeout): """Stop processing commands from clients for *timeout* milliseconds. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") fut = self.execute(b'CLIENT', b'PAUSE', timeout) return wait_ok(fut) def client_setname(self, name): """Set the current connection name.""" fut = self.execute(b'CLIENT', b'SETNAME', name) return wait_ok(fut) def command(self): """Get array of Redis commands.""" # TODO: convert result return self.execute(b'COMMAND', encoding='utf-8') def command_count(self): """Get total number of Redis commands.""" return self.execute(b'COMMAND', b'COUNT') def command_getkeys(self, command, *args, encoding='utf-8'): """Extract keys given a full Redis command.""" return self.execute(b'COMMAND', b'GETKEYS', command, *args, encoding=encoding) def command_info(self, command, *commands): """Get array of specific Redis command details.""" return self.execute(b'COMMAND', b'INFO', command, *commands, encoding='utf-8') def config_get(self, parameter='*'): """Get the value of a configuration parameter(s). If called without argument will return all parameters. :raises TypeError: if parameter is not string """ if not isinstance(parameter, str): raise TypeError("parameter must be str") fut = self.execute(b'CONFIG', b'GET', parameter, encoding='utf-8') return wait_make_dict(fut) def config_rewrite(self): """Rewrite the configuration file with the in memory configuration.""" fut = self.execute(b'CONFIG', b'REWRITE') return wait_ok(fut) def config_set(self, parameter, value): """Set a configuration parameter to the given value.""" if not isinstance(parameter, str): raise TypeError("parameter must be str") fut = self.execute(b'CONFIG', b'SET', parameter, value) return wait_ok(fut) def config_resetstat(self): """Reset the stats returned by INFO.""" fut = self.execute(b'CONFIG', b'RESETSTAT') return wait_ok(fut) def dbsize(self): """Return the number of keys in the selected database.""" return self.execute(b'DBSIZE') def debug_sleep(self, timeout): """Suspend connection for timeout seconds.""" fut = self.execute(b'DEBUG', b'SLEEP', timeout) return wait_ok(fut) def debug_object(self, key): """Get debugging information about a key.""" return self.execute(b'DEBUG', b'OBJECT', key) def debug_segfault(self, key): """Make the server crash.""" # won't test, this probably works return self.execute(b'DEBUG', 'SEGFAULT') # pragma: no cover def flushall(self): """Remove all keys from all databases.""" fut = self.execute(b'FLUSHALL') return wait_ok(fut) def flushdb(self): """Remove all keys from the current database.""" fut = self.execute('FLUSHDB') return wait_ok(fut) def info(self, section='default'): """Get information and statistics about the server. If called without argument will return default set of sections. For available sections, see http://redis.io/commands/INFO :raises ValueError: if section is invalid """ if not section: raise ValueError("invalid section") fut = self.execute(b'INFO', section, encoding='utf-8') return wait_convert(fut, parse_info) def lastsave(self): """Get the UNIX time stamp of the last successful save to disk.""" return self.execute(b'LASTSAVE') def monitor(self): """Listen for all requests received by the server in real time. .. warning:: Will not be implemented for now. """ # NOTE: will not implement for now; raise NotImplementedError def role(self): """Return the role of the server instance. Returns named tuples describing role of the instance. For fields information see http://redis.io/commands/role#output-format """ fut = self.execute(b'ROLE', encoding='utf-8') return wait_convert(fut, parse_role) def save(self): """Synchronously save the dataset to disk.""" return self.execute(b'SAVE') def shutdown(self, save=None): """Synchronously save the dataset to disk and then shut down the server. """ if save is self.SHUTDOWN_SAVE: return self.execute(b'SHUTDOWN', b'SAVE') elif save is self.SHUTDOWN_NOSAVE: return self.execute(b'SHUTDOWN', b'NOSAVE') else: return self.execute(b'SHUTDOWN') def slaveof(self, host=_NOTSET, port=None): """Make the server a slave of another instance, or promote it as master. Calling ``slaveof(None)`` will send ``SLAVEOF NO ONE``. .. versionchanged:: v0.2.6 ``slaveof()`` form deprecated in favour of explicit ``slaveof(None)``. """ if host is _NOTSET: logger.warning("slaveof() form is deprecated!" " Use slaveof(None) to turn redis into a MASTER.") host = None # TODO: drop in 0.3.0 if host is None and port is None: return self.execute(b'SLAVEOF', b'NO', b'ONE') return self.execute(b'SLAVEOF', host, port) def slowlog_get(self, length=None): """Returns the Redis slow queries log.""" if length is not None: if not isinstance(length, int): raise TypeError("length must be int or None") return self.execute(b'SLOWLOG', b'GET', length) else: return self.execute(b'SLOWLOG', b'GET') def slowlog_len(self): """Returns length of Redis slow queries log.""" return self.execute(b'SLOWLOG', b'LEN') def slowlog_reset(self): """Resets Redis slow queries log.""" fut = self.execute(b'SLOWLOG', b'RESET') return wait_ok(fut) def sync(self): """Redis-server internal command used for replication.""" return self.execute(b'SYNC') def time(self): """Return current server time.""" fut = self.execute(b'TIME') return wait_convert(fut, to_time) def _split(s): k, v = s.split('=') return k.replace('-', '_'), v def to_time(obj): return int(obj[0]) + int(obj[1]) * 1e-6 def to_tuples(value): line, *lines = value.splitlines(False) line = list(map(_split, line.split(' '))) ClientInfo = namedtuple('ClientInfo', ' '.join(k for k, v in line)) # TODO: parse flags and other known fields result = [ClientInfo(**dict(line))] for line in lines: result.append(ClientInfo(**dict(map(_split, line.split(' '))))) return result def parse_info(info): res = {} for block in info.split('\r\n\r\n'): section, *block = block.strip().splitlines() section = section[2:].lower() res[section] = tmp = {} for line in block: key, value = line.split(':') if ',' in line and '=' in line: value = dict(map(lambda i: i.split('='), value.split(','))) tmp[key] = value return res # XXX: may change in future # (may be hard to maintain for new/old redis versions) MasterInfo = namedtuple('MasterInfo', 'role replication_offset slaves') MasterSlaveInfo = namedtuple('MasterSlaveInfo', 'ip port ack_offset') SlaveInfo = namedtuple('SlaveInfo', 'role master_ip master_port state received') SentinelInfo = namedtuple('SentinelInfo', 'role masters') def parse_role(role): type_ = role[0] if type_ == 'master': slaves = [MasterSlaveInfo(s[0], int(s[1]), int(s[2])) for s in role[2]] return MasterInfo(role[0], int(role[1]), slaves) elif type_ == 'slave': return SlaveInfo(role[0], role[1], int(role[2]), role[3], int(role[4])) elif type_ == 'sentinel': return SentinelInfo(*role) return role aioredis-1.0.0/aioredis/commands/__init__.py0000644000175000017500000001470513203624357021634 0ustar alexeyalexey00000000000000from aioredis.connection import create_connection from aioredis.pool import create_pool from aioredis.util import _NOTSET from aioredis.abc import AbcPool from .generic import GenericCommandsMixin from .string import StringCommandsMixin from .hash import HashCommandsMixin from .hyperloglog import HyperLogLogCommandsMixin from .set import SetCommandsMixin from .sorted_set import SortedSetCommandsMixin from .transaction import TransactionsCommandsMixin, Pipeline, MultiExec from .list import ListCommandsMixin from .scripting import ScriptingCommandsMixin from .server import ServerCommandsMixin from .pubsub import PubSubCommandsMixin from .cluster import ClusterCommandsMixin from .geo import GeoCommandsMixin, GeoPoint, GeoMember __all__ = [ 'create_redis', 'create_redis_pool', 'Redis', 'Pipeline', 'MultiExec', 'GeoPoint', 'GeoMember', ] class Redis(GenericCommandsMixin, StringCommandsMixin, HyperLogLogCommandsMixin, SetCommandsMixin, HashCommandsMixin, TransactionsCommandsMixin, SortedSetCommandsMixin, ListCommandsMixin, ScriptingCommandsMixin, ServerCommandsMixin, PubSubCommandsMixin, ClusterCommandsMixin, GeoCommandsMixin): """High-level Redis interface. Gathers in one place Redis commands implemented in mixins. For commands details see: http://redis.io/commands/#connection """ def __init__(self, pool_or_conn): self._pool_or_conn = pool_or_conn def __repr__(self): return '<{} {!r}>'.format(self.__class__.__name__, self._pool_or_conn) def execute(self, command, *args, **kwargs): return self._pool_or_conn.execute(command, *args, **kwargs) def close(self): """Close client connections.""" self._pool_or_conn.close() async def wait_closed(self): """Coroutine waiting until underlying connections are closed.""" await self._pool_or_conn.wait_closed() @property def db(self): """Currently selected db index.""" return self._pool_or_conn.db @property def encoding(self): """Current set codec or None.""" return self._pool_or_conn.encoding @property def connection(self): """Either :class:`aioredis.RedisConnection`, or :class:`aioredis.ConnectionsPool` instance. """ return self._pool_or_conn @property def address(self): """Redis connection address (if applicable).""" return self._pool_or_conn.address @property def in_transaction(self): """Set to True when MULTI command was issued.""" # XXX: this must be bound to real connection return self._pool_or_conn.in_transaction @property def closed(self): """True if connection is closed.""" return self._pool_or_conn.closed def auth(self, password): """Authenticate to server. This method wraps call to :meth:`aioredis.RedisConnection.auth()` """ return self._pool_or_conn.auth(password) def echo(self, message, *, encoding=_NOTSET): """Echo the given string.""" return self.execute('ECHO', message, encoding=encoding) def ping(self, message=_NOTSET, *, encoding=_NOTSET): """Ping the server. Accept optional echo message. """ if message is not _NOTSET: args = (message,) else: args = () return self.execute('PING', *args, encoding=encoding) def quit(self): """Close the connection.""" # TODO: warn when using pool return self.execute('QUIT') def select(self, db): """Change the selected database for the current connection. This method wraps call to :meth:`aioredis.RedisConnection.select()` """ return self._pool_or_conn.select(db) def __await__(self): if isinstance(self._pool_or_conn, AbcPool): conn = yield from self._pool_or_conn.acquire().__await__() release = self._pool_or_conn.release else: # TODO: probably a lock is needed here if _pool_or_conn # is Connection instance. conn = self._pool_or_conn release = None return ContextRedis(conn, release) class ContextRedis(Redis): """An instance of Redis class bound to single connection.""" def __init__(self, conn, release_cb=None): super().__init__(conn) self._release_callback = release_cb def __enter__(self): return self def __exit__(self, *exc_info): if self._release_callback is not None: conn, self._pool_or_conn = self._pool_or_conn, None self._release_callback(conn) def __await__(self): return ContextRedis(self._pool_or_conn) yield async def create_redis(address, *, db=None, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None, loop=None): """Creates high-level Redis interface. This function is a coroutine. """ conn = await create_connection(address, db=db, password=password, ssl=ssl, encoding=encoding, parser=parser, timeout=timeout, connection_cls=connection_cls, loop=loop) return commands_factory(conn) async def create_redis_pool(address, *, db=None, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None, loop=None): """Creates high-level Redis interface. This function is a coroutine. """ pool = await create_pool(address, db=db, password=password, ssl=ssl, encoding=encoding, minsize=minsize, maxsize=maxsize, parser=parser, create_connection_timeout=timeout, pool_cls=pool_cls, connection_cls=connection_cls, loop=loop) return commands_factory(pool) aioredis-1.0.0/aioredis/commands/cluster.py0000644000175000017500000000730513203624357021554 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class ClusterCommandsMixin: """Cluster commands mixin. For commands details see: http://redis.io/commands#cluster """ def cluster_add_slots(self, slot, *slots): """Assign new hash slots to receiving node.""" slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self.execute(b'CLUSTER', b'ADDSLOTS', *slots) return wait_ok(fut) def cluster_count_failure_reports(self, node_id): """Return the number of failure reports active for a given node.""" return self.execute( b'CLUSTER', b'COUNT-FAILURE-REPORTS', node_id) def cluster_count_key_in_slots(self, slot): """Return the number of local keys in the specified hash slot.""" if not isinstance(slot, int): raise TypeError("Expected slot to be of type int, got {}" .format(type(slot))) return self.execute(b'CLUSTER', b'COUNTKEYSINSLOT', slot) def cluster_del_slots(self, slot, *slots): """Set hash slots as unbound in receiving node.""" slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self.execute(b'CLUSTER', b'DELSLOTS', *slots) return wait_ok(fut) def cluster_failover(self): """Forces a slave to perform a manual failover of its master.""" pass # TODO: Implement def cluster_forget(self, node_id): """Remove a node from the nodes table.""" fut = self.execute(b'CLUSTER', b'FORGET', node_id) return wait_ok(fut) def cluster_get_keys_in_slots(self, slot, count, *, encoding): """Return local key names in the specified hash slot.""" return self.execute(b'CLUSTER', b'GETKEYSINSLOT', slot, count, encoding=encoding) def cluster_info(self): """Provides info about Redis Cluster node state.""" pass # TODO: Implement def cluster_keyslot(self, key): """Returns the hash slot of the specified key.""" return self.execute(b'CLUSTER', b'KEYSLOT', key) def cluster_meet(self, ip, port): """Force a node cluster to handshake with another node.""" fut = self.execute(b'CLUSTER', b'MEET', ip, port) return wait_ok(fut) def cluster_nodes(self): """Get Cluster config for the node.""" pass # TODO: Implement def cluster_replicate(self, node_id): """Reconfigure a node as a slave of the specified master node.""" fut = self.execute(b'CLUSTER', b'REPLICATE', node_id) return wait_ok(fut) def cluster_reset(self, *, hard=False): """Reset a Redis Cluster node.""" reset = hard and b'HARD' or b'SOFT' fut = self.execute(b'CLUSTER', b'RESET', reset) return wait_ok(fut) def cluster_save_config(self): """Force the node to save cluster state on disk.""" fut = self.execute(b'CLUSTER', b'SAVECONFIG') return wait_ok(fut) def cluster_set_config_epoch(self, config_epoch): """Set the configuration epoch in a new node.""" fut = self.execute(b'CLUSTER', b'SET-CONFIG-EPOCH', config_epoch) return wait_ok(fut) def cluster_setslot(self, slot, command, node_id): """Bind a hash slot to specified node.""" pass # TODO: Implement def cluster_slaves(self, node_id): """List slave nodes of the specified master node.""" pass # TODO: Implement def cluster_slots(self): """Get array of Cluster slot to node mappings.""" pass # TODO: Implement aioredis-1.0.0/aioredis/commands/geo.py0000644000175000017500000001502613203624357020644 0ustar alexeyalexey00000000000000from collections import namedtuple from aioredis.util import wait_convert, _NOTSET GeoPoint = namedtuple('GeoPoint', ('longitude', 'latitude')) GeoMember = namedtuple('GeoMember', ('member', 'dist', 'hash', 'coord')) class GeoCommandsMixin: """Geo commands mixin. For commands details see: http://redis.io/commands#geo """ def geoadd(self, key, longitude, latitude, member, *args, **kwargs): """Add one or more geospatial items in the geospatial index represented using a sorted set. :rtype: int """ return self.execute( b'GEOADD', key, longitude, latitude, member, *args, **kwargs ) def geohash(self, key, member, *members, **kwargs): """Returns members of a geospatial index as standard geohash strings. :rtype: list[str or bytes or None] """ return self.execute( b'GEOHASH', key, member, *members, **kwargs ) def geopos(self, key, member, *members, **kwargs): """Returns longitude and latitude of members of a geospatial index. :rtype: list[GeoPoint or None] """ fut = self.execute(b'GEOPOS', key, member, *members, **kwargs) return wait_convert(fut, make_geopos) def geodist(self, key, member1, member2, unit='m'): """Returns the distance between two members of a geospatial index. :rtype: list[float or None] """ fut = self.execute(b'GEODIST', key, member1, member2, unit) return wait_convert(fut, make_geodist) def georadius(self, key, longitude, latitude, radius, unit='m', *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=_NOTSET): """Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point. Return value follows Redis convention: * if none of ``WITH*`` flags are set -- list of strings returned: >>> await redis.georadius('Sicily', 15, 37, 200, 'km') [b"Palermo", b"Catania"] * if any flag (or all) is set -- list of named tuples returned: >>> await redis.georadius('Sicily', 15, 37, 200, 'km', ... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] :raises TypeError: radius is not float or int :raises TypeError: count is not int :raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft`` :raises ValueError: if sort not equal ``ASC`` or ``DESC`` :rtype: list[str] or list[GeoMember] """ args = validate_georadius_options( radius, unit, with_dist, with_hash, with_coord, count, sort ) fut = self.execute( b'GEORADIUS', key, longitude, latitude, radius, unit, *args, encoding=encoding ) if with_dist or with_hash or with_coord: return wait_convert(fut, make_geomember, with_dist=with_dist, with_hash=with_hash, with_coord=with_coord) return fut def georadiusbymember(self, key, member, radius, unit='m', *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=_NOTSET): """Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member. Return value follows Redis convention: * if none of ``WITH*`` flags are set -- list of strings returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km') [b"Palermo", b"Catania"] * if any flag (or all) is set -- list of named tuples returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km', ... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] :raises TypeError: radius is not float or int :raises TypeError: count is not int :raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft`` :raises ValueError: if sort not equal ``ASC`` or ``DESC`` :rtype: list[str] or list[GeoMember] """ args = validate_georadius_options( radius, unit, with_dist, with_hash, with_coord, count, sort ) fut = self.execute( b'GEORADIUSBYMEMBER', key, member, radius, unit, *args, encoding=encoding) if with_dist or with_hash or with_coord: return wait_convert(fut, make_geomember, with_dist=with_dist, with_hash=with_hash, with_coord=with_coord) return fut def validate_georadius_options(radius, unit, with_dist, with_hash, with_coord, count, sort): args = [] if with_dist: args.append(b'WITHDIST') if with_hash: args.append(b'WITHHASH') if with_coord: args.append(b'WITHCOORD') if unit not in ['m', 'km', 'mi', 'ft']: raise ValueError("unit argument must be 'm', 'km', 'mi' or 'ft'") if not isinstance(radius, (int, float)): raise TypeError("radius argument must be int or float") if count: if not isinstance(count, int): raise TypeError("count argument must be int") args += [b'COUNT', count] if sort: if sort not in ['ASC', 'DESC']: raise ValueError("sort argument must be euqal 'ASC' or 'DESC'") args.append(sort) return args def make_geocoord(value): if isinstance(value, list): return GeoPoint(*map(float, value)) return value def make_geodist(value): if value: return float(value) return value def make_geopos(value): return [make_geocoord(val) for val in value] def make_geomember(value, with_dist, with_coord, with_hash): res_rows = [] for row in value: name = row.pop(0) dist = hash_ = coord = None if with_dist: dist = float(row.pop(0)) if with_hash: hash_ = int(row.pop(0)) if with_coord: coord = GeoPoint(*map(float, row.pop(0))) res_rows.append(GeoMember(name, dist, hash_, coord)) return res_rows aioredis-1.0.0/aioredis/commands/transaction.py0000644000175000017500000002334313203624357022420 0ustar alexeyalexey00000000000000import asyncio import functools from ..abc import AbcPool from ..errors import ( RedisError, PipelineError, MultiExecError, ConnectionClosedError, ) from ..util import ( wait_ok, _set_exception, ) class TransactionsCommandsMixin: """Transaction commands mixin. For commands details see: http://redis.io/commands/#transactions Transactions HOWTO: >>> tr = redis.multi_exec() >>> result_future1 = tr.incr('foo') >>> result_future2 = tr.incr('bar') >>> try: ... result = await tr.execute() ... except MultiExecError: ... pass # check what happened >>> result1 = await result_future1 >>> result2 = await result_future2 >>> assert result == [result1, result2] """ def unwatch(self): """Forget about all watched keys.""" fut = self._pool_or_conn.execute(b'UNWATCH') return wait_ok(fut) def watch(self, key, *keys): """Watch the given keys to determine execution of the MULTI/EXEC block. """ fut = self._pool_or_conn.execute(b'WATCH', key, *keys) return wait_ok(fut) def multi_exec(self): """Returns MULTI/EXEC pipeline wrapper. Usage: >>> tr = redis.multi_exec() >>> fut1 = tr.incr('foo') # NO `await` as it will block forever! >>> fut2 = tr.incr('bar') >>> result = await tr.execute() >>> result [1, 1] >>> await asyncio.gather(fut1, fut2) [1, 1] """ return MultiExec(self._pool_or_conn, self.__class__, loop=self._pool_or_conn._loop) def pipeline(self): """Returns :class:`Pipeline` object to execute bulk of commands. It is provided for convenience. Commands can be pipelined without it. Example: >>> pipe = redis.pipeline() >>> fut1 = pipe.incr('foo') # NO `await` as it will block forever! >>> fut2 = pipe.incr('bar') >>> result = await pipe.execute() >>> result [1, 1] >>> await asyncio.gather(fut1, fut2) [1, 1] >>> # >>> # The same can be done without pipeline: >>> # >>> fut1 = redis.incr('foo') # the 'INCRY foo' command already sent >>> fut2 = redis.incr('bar') >>> await asyncio.gather(fut1, fut2) [2, 2] """ return Pipeline(self._pool_or_conn, self.__class__, loop=self._pool_or_conn._loop) class _RedisBuffer: def __init__(self, pipeline, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._pipeline = pipeline self._loop = loop def execute(self, cmd, *args, **kw): fut = self._loop.create_future() self._pipeline.append((fut, cmd, args, kw)) return fut # TODO: add here or remove in connection methods like `select`, `auth` etc class Pipeline: """Commands pipeline. Usage: >>> pipe = redis.pipeline() >>> fut1 = pipe.incr('foo') >>> fut2 = pipe.incr('bar') >>> await pipe.execute() [1, 1] >>> await fut1 1 >>> await fut2 1 """ error_class = PipelineError def __init__(self, pool_or_connection, commands_factory=lambda conn: conn, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._pool_or_conn = pool_or_connection self._loop = loop self._pipeline = [] self._results = [] self._buffer = _RedisBuffer(self._pipeline, loop=loop) self._redis = commands_factory(self._buffer) self._done = False def __getattr__(self, name): assert not self._done, "Pipeline already executed. Create new one." attr = getattr(self._redis, name) if callable(attr): @functools.wraps(attr) def wrapper(*args, **kw): try: task = asyncio.ensure_future(attr(*args, **kw), loop=self._loop) except Exception as exc: task = self._loop.create_future() task.set_exception(exc) self._results.append(task) return task return wrapper return attr async def execute(self, *, return_exceptions=False): """Execute all buffered commands. Any exception that is raised by any command is caught and raised later when processing results. Exceptions can also be returned in result if `return_exceptions` flag is set to True. """ assert not self._done, "Pipeline already executed. Create new one." self._done = True if self._pipeline: if isinstance(self._pool_or_conn, AbcPool): async with self._pool_or_conn.get() as conn: return await self._do_execute( conn, return_exceptions=return_exceptions) else: return await self._do_execute( self._pool_or_conn, return_exceptions=return_exceptions) else: return await self._gather_result(return_exceptions) async def _do_execute(self, conn, *, return_exceptions=False): await asyncio.gather(*self._send_pipeline(conn), loop=self._loop, return_exceptions=True) return await self._gather_result(return_exceptions) async def _gather_result(self, return_exceptions): errors = [] results = [] for fut in self._results: try: res = await fut results.append(res) except Exception as exc: errors.append(exc) results.append(exc) if errors and not return_exceptions: raise self.error_class(errors) return results def _send_pipeline(self, conn): for fut, cmd, args, kw in self._pipeline: try: result_fut = conn.execute(cmd, *args, **kw) result_fut.add_done_callback( functools.partial(self._check_result, waiter=fut)) except Exception as exc: fut.set_exception(exc) else: yield result_fut def _check_result(self, fut, waiter): if fut.cancelled(): waiter.cancel() elif fut.exception(): waiter.set_exception(fut.exception()) else: waiter.set_result(fut.result()) class MultiExec(Pipeline): """Multi/Exec pipeline wrapper. Usage: >>> tr = redis.multi_exec() >>> f1 = tr.incr('foo') >>> f2 = tr.incr('bar') >>> # A) >>> await tr.execute() >>> res1 = await f1 >>> res2 = await f2 >>> # or B) >>> res1, res2 = await tr.execute() and ofcourse try/except: >>> tr = redis.multi_exec() >>> f1 = tr.incr('1') # won't raise any exception (why?) >>> try: ... res = await tr.execute() ... except RedisError: ... pass >>> assert f1.done() >>> assert f1.result() is res >>> tr = redis.multi_exec() >>> wait_ok_coro = tr.mset('1') >>> try: ... ok1 = await tr.execute() ... except RedisError: ... pass # handle it >>> ok2 = await wait_ok_coro >>> # for this to work `wait_ok_coro` must be wrapped in Future """ error_class = MultiExecError async def _do_execute(self, conn, *, return_exceptions=False): self._waiters = waiters = [] multi = conn.execute('MULTI') coros = list(self._send_pipeline(conn)) exec_ = conn.execute('EXEC') gather = asyncio.gather(multi, *coros, loop=self._loop, return_exceptions=True) last_error = None try: await asyncio.shield(gather, loop=self._loop) except asyncio.CancelledError: await gather except Exception as err: last_error = err raise finally: if conn.closed: if last_error is None: last_error = ConnectionClosedError() for fut in waiters: _set_exception(fut, last_error) # fut.cancel() for fut in self._results: if not fut.done(): fut.set_exception(last_error) # fut.cancel() else: try: results = await exec_ except RedisError as err: for fut in waiters: fut.set_exception(err) else: assert len(results) == len(waiters), ( "Results does not match waiters", results, waiters) self._resolve_waiters(results, return_exceptions) return (await self._gather_result(return_exceptions)) def _resolve_waiters(self, results, return_exceptions): errors = [] for val, fut in zip(results, self._waiters): if isinstance(val, RedisError): fut.set_exception(val) errors.append(val) else: fut.set_result(val) if errors and not return_exceptions: raise MultiExecError(errors) def _check_result(self, fut, waiter): assert waiter not in self._waiters, (fut, waiter, self._waiters) assert not waiter.done(), waiter if fut.cancelled(): # await gather was cancelled waiter.cancel() elif fut.exception(): # server replied with error waiter.set_exception(fut.exception()) elif fut.result() in {b'QUEUED', 'QUEUED'}: # got result, it should be QUEUED self._waiters.append(waiter) aioredis-1.0.0/aioredis/commands/pubsub.py0000644000175000017500000000664013203624357021374 0ustar alexeyalexey00000000000000import json from aioredis.util import wait_make_dict class PubSubCommandsMixin: """Pub/Sub commands mixin. For commands details see: http://redis.io/commands/#pubsub """ def publish(self, channel, message): """Post a message to channel.""" return self.execute(b'PUBLISH', channel, message) def publish_json(self, channel, obj): """Post a JSON-encoded message to channel.""" return self.publish(channel, json.dumps(obj)) def subscribe(self, channel, *channels): """Switch connection to Pub/Sub mode and subscribe to specified channels. Arguments can be instances of :class:`~aioredis.Channel`. Returns :func:`asyncio.gather()` coroutine which when done will return a list of :class:`~aioredis.Channel` objects. """ conn = self._pool_or_conn return wait_return_channels( conn.execute_pubsub(b'SUBSCRIBE', channel, *channels), conn.pubsub_channels) def unsubscribe(self, channel, *channels): """Unsubscribe from specific channels. Arguments can be instances of :class:`~aioredis.Channel`. """ conn = self._pool_or_conn return conn.execute_pubsub(b'UNSUBSCRIBE', channel, *channels) def psubscribe(self, pattern, *patterns): """Switch connection to Pub/Sub mode and subscribe to specified patterns. Arguments can be instances of :class:`~aioredis.Channel`. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed :class:`~aioredis.Channel` objects with ``is_pattern`` property set to ``True``. """ conn = self._pool_or_conn return wait_return_channels( conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns), conn.pubsub_patterns) def punsubscribe(self, pattern, *patterns): """Unsubscribe from specific patterns. Arguments can be instances of :class:`~aioredis.Channel`. """ conn = self._pool_or_conn return conn.execute_pubsub(b'PUNSUBSCRIBE', pattern, *patterns) def pubsub_channels(self, pattern=None): """Lists the currently active channels.""" args = [b'PUBSUB', b'CHANNELS'] if pattern is not None: args.append(pattern) return self.execute(*args) def pubsub_numsub(self, *channels): """Returns the number of subscribers for the specified channels.""" return wait_make_dict(self.execute( b'PUBSUB', b'NUMSUB', *channels)) def pubsub_numpat(self): """Returns the number of subscriptions to patterns.""" return self.execute(b'PUBSUB', b'NUMPAT') @property def channels(self): """Returns read-only channels dict. See :attr:`~aioredis.RedisConnection.pubsub_channels` """ return self._pool_or_conn.pubsub_channels @property def patterns(self): """Returns read-only patterns dict. See :attr:`~aioredis.RedisConnection.pubsub_patterns` """ return self._pool_or_conn.pubsub_patterns @property def in_pubsub(self): """Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. """ return self._pool_or_conn.in_pubsub async def wait_return_channels(fut, channels_dict): return [channels_dict[name] for cmd, name, count in await fut] aioredis-1.0.0/aioredis/locks.py0000644000175000017500000000247413203624357017407 0ustar alexeyalexey00000000000000from asyncio.locks import Lock as _Lock from asyncio import coroutine from asyncio import futures # Fixes an issue with all Python versions that leaves pending waiters # without being awakened when the first waiter is canceled. # Code adapted from the PR https://github.com/python/cpython/pull/1031 # Waiting once it is merged to make a proper condition to relay on # the stdlib implementation or this one patched class Lock(_Lock): @coroutine def acquire(self): """Acquire a lock. This method blocks until the lock is unlocked, then sets it to locked and returns True. """ if not self._locked and all(w.cancelled() for w in self._waiters): self._locked = True return True fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut self._locked = True return True except futures.CancelledError: if not self._locked: # pragma: no cover self._wake_up_first() raise finally: self._waiters.remove(fut) def _wake_up_first(self): """Wake up the first waiter who isn't cancelled.""" for fut in self._waiters: if not fut.done(): fut.set_result(True) break aioredis-1.0.0/aioredis/log.py0000644000175000017500000000064713203624357017055 0ustar alexeyalexey00000000000000import os import sys import logging logger = logging.getLogger('aioredis') sentinel_logger = logger.getChild('sentinel') if os.environ.get("AIOREDIS_DEBUG"): logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(stream=sys.stderr) handler.setFormatter(logging.Formatter( "%(asctime)s %(name)s %(levelname)s %(message)s")) logger.addHandler(handler) os.environ["AIOREDIS_DEBUG"] = "" aioredis-1.0.0/aioredis/pool.py0000644000175000017500000004071113203624357017241 0ustar alexeyalexey00000000000000import asyncio import collections import types from .connection import create_connection, _PUBSUB_COMMANDS from .log import logger from .util import parse_url from .errors import PoolClosedError from .abc import AbcPool from .locks import Lock async def create_pool(address, *, db=None, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, loop=None, create_connection_timeout=None, pool_cls=None, connection_cls=None): # FIXME: rewrite docstring """Creates Redis Pool. By default it creates pool of Redis instances, but it is also possible to create pool of plain connections by passing ``lambda conn: conn`` as commands_factory. *commands_factory* parameter is deprecated since v0.2.9 All arguments are the same as for create_connection. Returns RedisPool instance or a pool_cls if it is given. """ if pool_cls: assert issubclass(pool_cls, AbcPool),\ "pool_class does not meet the AbcPool contract" cls = pool_cls else: cls = ConnectionsPool if isinstance(address, str): address, options = parse_url(address) db = options.setdefault('db', db) password = options.setdefault('password', password) encoding = options.setdefault('encoding', encoding) create_connection_timeout = options.setdefault( 'timeout', create_connection_timeout) if 'ssl' in options: assert options['ssl'] or (not options['ssl'] and not ssl), ( "Conflicting ssl options are set", options['ssl'], ssl) ssl = ssl or options['ssl'] # TODO: minsize/maxsize pool = cls(address, db, password, encoding, minsize=minsize, maxsize=maxsize, ssl=ssl, parser=parser, create_connection_timeout=create_connection_timeout, connection_cls=connection_cls, loop=loop) try: await pool._fill_free(override_min=False) except Exception as ex: pool.close() await pool.wait_closed() raise return pool class ConnectionsPool(AbcPool): """Redis connections pool.""" def __init__(self, address, db=None, password=None, encoding=None, *, minsize, maxsize, ssl=None, parser=None, create_connection_timeout=None, connection_cls=None, loop=None): assert isinstance(minsize, int) and minsize >= 0, ( "minsize must be int >= 0", minsize, type(minsize)) assert maxsize is not None, "Arbitrary pool size is disallowed." assert isinstance(maxsize, int) and maxsize > 0, ( "maxsize must be int > 0", maxsize, type(maxsize)) assert minsize <= maxsize, ( "Invalid pool min/max sizes", minsize, maxsize) if loop is None: loop = asyncio.get_event_loop() self._address = address self._db = db self._password = password self._ssl = ssl self._encoding = encoding self._parser_class = parser self._minsize = minsize self._create_connection_timeout = create_connection_timeout self._loop = loop self._pool = collections.deque(maxlen=maxsize) self._used = set() self._acquiring = 0 self._cond = asyncio.Condition(lock=Lock(loop=loop), loop=loop) self._close_state = asyncio.Event(loop=loop) self._close_waiter = None self._pubsub_conn = None self._connection_cls = connection_cls def __repr__(self): return '<{} [db:{}, size:[{}:{}], free:{}]>'.format( self.__class__.__name__, self.db, self.minsize, self.maxsize, self.freesize) @property def minsize(self): """Minimum pool size.""" return self._minsize @property def maxsize(self): """Maximum pool size.""" return self._pool.maxlen @property def size(self): """Current pool size.""" return self.freesize + len(self._used) + self._acquiring @property def freesize(self): """Current number of free connections.""" return len(self._pool) @property def address(self): return self._address async def clear(self): """Clear pool connections. Close and remove all free connections. """ with (await self._cond): await self._do_clear() async def _do_clear(self): waiters = [] while self._pool: conn = self._pool.popleft() conn.close() waiters.append(conn.wait_closed()) await asyncio.gather(*waiters, loop=self._loop) async def _do_close(self): await self._close_state.wait() with (await self._cond): assert not self._acquiring, self._acquiring waiters = [] while self._pool: conn = self._pool.popleft() conn.close() waiters.append(conn.wait_closed()) for conn in self._used: conn.close() waiters.append(conn.wait_closed()) await asyncio.gather(*waiters, loop=self._loop) # TODO: close _pubsub_conn connection logger.debug("Closed %d connection(s)", len(waiters)) def close(self): """Close all free and in-progress connections and mark pool as closed. """ if not self._close_state.is_set(): self._close_waiter = asyncio.ensure_future(self._do_close(), loop=self._loop) self._close_state.set() @property def closed(self): """True if pool is closed.""" return self._close_state.is_set() async def wait_closed(self): """Wait until pool gets closed.""" await self._close_state.wait() assert self._close_waiter is not None await asyncio.shield(self._close_waiter, loop=self._loop) @property def db(self): """Currently selected db index.""" return self._db or 0 @property def encoding(self): """Current set codec or None.""" return self._encoding def execute(self, command, *args, **kw): """Executes redis command in a free connection and returns future waiting for result. Picks connection from free pool and send command through that connection. If no connection is found, returns coroutine waiting for free connection to execute command. """ conn, address = self.get_connection(command, args) if conn is not None: fut = conn.execute(command, *args, **kw) return self._check_result(fut, command, args, kw) else: coro = self._wait_execute(address, command, args, kw) return self._check_result(coro, command, args, kw) def execute_pubsub(self, command, *channels): """Executes Redis (p)subscribe/(p)unsubscribe commands. ConnectionsPool picks separate connection for pub/sub and uses it until explicitly closed or disconnected (unsubscribing from all channels/patterns will leave connection locked for pub/sub use). There is no auto-reconnect for this PUB/SUB connection. Returns asyncio.gather coroutine waiting for all channels/patterns to receive answers. """ conn, address = self.get_connection(command) if conn is not None: return conn.execute_pubsub(command, *channels) else: return self._wait_execute_pubsub(address, command, channels, {}) def get_connection(self, command, args=()): """Get free connection from pool. Returns connection. """ # TODO: find a better way to determine if connection is free # and not havily used. command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS if is_pubsub and self._pubsub_conn: if not self._pubsub_conn.closed: return self._pubsub_conn, self._pubsub_conn.address self._pubsub_conn = None for i in range(self.freesize): conn = self._pool[0] self._pool.rotate(1) if conn.closed: # or conn._waiters: (eg: busy connection) continue if conn.in_pubsub: continue if is_pubsub: self._pubsub_conn = conn self._pool.remove(conn) self._used.add(conn) return conn, conn.address return None, self._address # figure out def _check_result(self, fut, *data): """Hook to check result or catch exception (like MovedError). This method can be coroutine. """ return fut async def _wait_execute(self, address, command, args, kw): """Acquire connection and execute command.""" conn = await self.acquire(command, args) try: return (await conn.execute(command, *args, **kw)) finally: self.release(conn) async def _wait_execute_pubsub(self, address, command, args, kw): if self.closed: raise PoolClosedError("Pool is closed") assert self._pubsub_conn is None or self._pubsub_conn.closed, ( "Expected no or closed connection", self._pubsub_conn) with (await self._cond): if self.closed: raise PoolClosedError("Pool is closed") if self._pubsub_conn is None or self._pubsub_conn.closed: conn = await self._create_new_connection(address) self._pubsub_conn = conn conn = self._pubsub_conn return (await conn.execute_pubsub(command, *args, **kw)) async def select(self, db): """Changes db index for all free connections. All previously acquired connections will be closed when released. """ res = True with (await self._cond): for i in range(self.freesize): res = res and (await self._pool[i].select(db)) else: self._db = db return res async def auth(self, password): self._password = password with (await self._cond): for i in range(self.freesize): await self._pool[i].auth(password) @property def in_pubsub(self): if self._pubsub_conn and not self._pubsub_conn.closed: return self._pubsub_conn.in_pubsub return 0 @property def pubsub_channels(self): if self._pubsub_conn and not self._pubsub_conn.closed: return self._pubsub_conn.pubsub_channels return types.MappingProxyType({}) @property def pubsub_patterns(self): if self._pubsub_conn and not self._pubsub_conn.closed: return self._pubsub_conn.pubsub_patterns return types.MappingProxyType({}) async def acquire(self, command=None, args=()): """Acquires a connection from free pool. Creates new connection if needed. """ if self.closed: raise PoolClosedError("Pool is closed") with await self._cond: if self.closed: raise PoolClosedError("Pool is closed") while True: await self._fill_free(override_min=True) if self.freesize: conn = self._pool.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: await self._cond.wait() def release(self, conn): """Returns used connection back into pool. When returned connection has db index that differs from one in pool the connection will be closed and dropped. When queue of free connections is full the connection will be dropped. """ assert conn in self._used, ( "Invalid connection, maybe from other pool", conn) self._used.remove(conn) if not conn.closed: if conn.in_transaction: logger.warning( "Connection %r is in transaction, closing it.", conn) conn.close() elif conn.in_pubsub: logger.warning( "Connection %r is in subscribe mode, closing it.", conn) conn.close() elif conn._waiters: logger.warning( "Connection %r has pending commands, closing it.", conn) conn.close() elif conn.db == self.db: if self.maxsize and self.freesize < self.maxsize: self._pool.append(conn) else: # consider this connection as old and close it. conn.close() else: conn.close() # FIXME: check event loop is not closed asyncio.ensure_future(self._wakeup(), loop=self._loop) def _drop_closed(self): for i in range(self.freesize): conn = self._pool[0] if conn.closed: self._pool.popleft() else: self._pool.rotate(1) async def _fill_free(self, *, override_min): # drop closed connections first self._drop_closed() address = self._address while self.size < self.minsize: self._acquiring += 1 try: conn = await self._create_new_connection(address) # check the healthy of that connection, if # something went wrong just trigger the Exception await conn.execute('ping') self._pool.append(conn) finally: self._acquiring -= 1 # connection may be closed at yield point self._drop_closed() if self.freesize: return if override_min: while not self._pool and self.size < self.maxsize: self._acquiring += 1 try: conn = await self._create_new_connection(address) self._pool.append(conn) finally: self._acquiring -= 1 # connection may be closed at yield point self._drop_closed() def _create_new_connection(self, address): return create_connection(address, db=self._db, password=self._password, ssl=self._ssl, encoding=self._encoding, parser=self._parser_class, timeout=self._create_connection_timeout, connection_cls=self._connection_cls, loop=self._loop) async def _wakeup(self, closing_conn=None): with (await self._cond): self._cond.notify() if closing_conn is not None: await closing_conn.wait_closed() def __enter__(self): raise RuntimeError( "'await' should be used as a context manager expression") def __exit__(self, *args): pass # pragma: nocover def __await__(self): # To make `with await pool` work conn = yield from self.acquire().__await__() return _ConnectionContextManager(self, conn) def get(self): '''Return async context manager for working with connection. async with pool.get() as conn: await conn.get(key) ''' return _AsyncConnectionContextManager(self) class _ConnectionContextManager: __slots__ = ('_pool', '_conn') def __init__(self, pool, conn): self._pool = pool self._conn = conn def __enter__(self): return self._conn def __exit__(self, exc_type, exc_value, tb): try: self._pool.release(self._conn) finally: self._pool = None self._conn = None class _AsyncConnectionContextManager: __slots__ = ('_pool', '_conn') def __init__(self, pool): self._pool = pool self._conn = None async def __aenter__(self): conn = await self._pool.acquire() self._conn = conn return self._conn async def __aexit__(self, exc_type, exc_value, tb): try: self._pool.release(self._conn) finally: self._pool = None self._conn = None aioredis-1.0.0/aioredis/sentinel/0000755000175000017500000000000013203634127017530 5ustar alexeyalexey00000000000000aioredis-1.0.0/aioredis/sentinel/pool.py0000644000175000017500000004327413203624357021071 0ustar alexeyalexey00000000000000import asyncio import contextlib from concurrent.futures import ALL_COMPLETED from async_timeout import timeout as async_timeout from ..log import sentinel_logger from ..pubsub import Receiver from ..pool import create_pool, ConnectionsPool from ..errors import ( MasterNotFoundError, SlaveNotFoundError, PoolClosedError, RedisError, MasterReplyError, SlaveReplyError, ) # Address marker for discovery _NON_DISCOVERED = object() _logger = sentinel_logger.getChild('monitor') async def create_sentinel_pool(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None, timeout=0.2, loop=None): """Create SentinelPool.""" # FIXME: revise default timeout value assert isinstance(sentinels, (list, tuple)), sentinels if loop is None: loop = asyncio.get_event_loop() pool = SentinelPool(sentinels, db=db, password=password, ssl=ssl, encoding=encoding, parser=parser, minsize=minsize, maxsize=maxsize, timeout=timeout, loop=loop) await pool.discover() return pool class SentinelPool: """Sentinel connections pool. Holds connection pools to known and discovered (TBD) Sentinels as well as services' connections. """ def __init__(self, sentinels, *, db=None, password=None, ssl=None, encoding=None, parser=None, minsize, maxsize, timeout, loop=None): if loop is None: loop = asyncio.get_event_loop() # TODO: add connection/discover timeouts; # and what to do if no master is found: # (raise error or try forever or try until timeout) # XXX: _sentinels is unordered self._sentinels = set(sentinels) self._loop = loop self._timeout = timeout self._pools = [] # list of sentinel pools self._masters = {} self._slaves = {} self._parser_class = parser self._redis_db = db self._redis_password = password self._redis_ssl = ssl self._redis_encoding = encoding self._redis_minsize = minsize self._redis_maxsize = maxsize self._close_state = asyncio.Event(loop=loop) self._close_waiter = None self._monitor = monitor = Receiver(loop=loop) async def echo_events(): try: while await monitor.wait_message(): ch, (ev, data) = await monitor.get(encoding='utf-8') ev = ev.decode('utf-8') _logger.debug("%s: %s", ev, data) if ev in ('+odown',): typ, name, *tail = data.split(' ') if typ == 'master': self._need_rediscover(name) # TODO: parse messages; # watch +new-epoch which signals `failover in progres` # freeze reconnection # wait / discover new master (find proper way) # unfreeze reconnection # # discover master in default way # get-master-addr... # connnect # role # etc... except asyncio.CancelledError: pass self._monitor_task = asyncio.ensure_future(echo_events(), loop=loop) @property def discover_timeout(self): """Timeout (seconds) for Redis/Sentinel command calls during master/slave address discovery. """ return self._timeout def master_for(self, service): """Returns wrapper to master's pool for requested service.""" # TODO: make it coroutine and connect minsize connections if service not in self._masters: self._masters[service] = ManagedPool( self, service, is_master=True, db=self._redis_db, password=self._redis_password, encoding=self._redis_encoding, minsize=self._redis_minsize, maxsize=self._redis_maxsize, ssl=self._redis_ssl, parser=self._parser_class, loop=self._loop) return self._masters[service] def slave_for(self, service): """Returns wrapper to slave's pool for requested service.""" # TODO: make it coroutine and connect minsize connections if service not in self._slaves: self._slaves[service] = ManagedPool( self, service, is_master=False, db=self._redis_db, password=self._redis_password, encoding=self._redis_encoding, minsize=self._redis_minsize, maxsize=self._redis_maxsize, ssl=self._redis_ssl, parser=self._parser_class, loop=self._loop) return self._slaves[service] def execute(self, command, *args, **kwargs): """Execute sentinel command.""" # TODO: choose pool # kwargs can be used to control which sentinel to use if self.closed: raise PoolClosedError("Sentinel pool is closed") for pool in self._pools: return pool.execute(command, *args, **kwargs) # how to handle errors and pick other pool? # is the only way to make it coroutine? @property def closed(self): """True if pool is closed or closing.""" return self._close_state.is_set() def close(self): """Close all controlled connections (both sentinel and redis).""" if not self._close_state.is_set(): self._close_waiter = asyncio.ensure_future(self._do_close(), loop=self._loop) self._close_state.set() async def _do_close(self): await self._close_state.wait() # TODO: lock tasks = [] task, self._monitor_task = self._monitor_task, None task.cancel() tasks.append(task) while self._pools: pool = self._pools.pop(0) pool.close() tasks.append(pool.wait_closed()) while self._masters: _, pool = self._masters.popitem() pool.close() tasks.append(pool.wait_closed()) while self._slaves: _, pool = self._slaves.popitem() pool.close() tasks.append(pool.wait_closed()) await asyncio.gather(*tasks, loop=self._loop) async def wait_closed(self): """Wait until pool gets closed.""" await self._close_state.wait() assert self._close_waiter is not None await asyncio.shield(self._close_waiter, loop=self._loop) async def discover(self, timeout=0.2): # TODO: better name? """Discover sentinels and all monitored services within given timeout. If no sentinels discovered within timeout: TimeoutError is raised. If some sentinels were discovered but not all — it is ok. If not all monitored services (masters/slaves) discovered (or connections established) — it is ok. TBD: what if some sentinels/services unreachable; """ # TODO: check not closed # TODO: discovery must be done with some customizable timeout. tasks = [] pools = [] for addr in self._sentinels: # iterate over unordered set tasks.append(self._connect_sentinel(addr, timeout, pools)) done, pending = await asyncio.wait(tasks, loop=self._loop, return_when=ALL_COMPLETED) assert not pending, ("Expected all tasks to complete", done, pending) for task in done: result = task.result() if isinstance(result, Exception): continue # FIXME if not pools: raise Exception("Could not connect to any sentinel") pools, self._pools[:] = self._pools[:], pools # TODO: close current connections for pool in pools: pool.close() await pool.wait_closed() # TODO: discover peer sentinels for pool in self._pools: await pool.execute_pubsub( b'psubscribe', self._monitor.pattern('*')) async def _connect_sentinel(self, address, timeout, pools): """Try to connect to specified Sentinel returning either connections pool or exception. """ try: with async_timeout(timeout, loop=self._loop): pool = await create_pool( address, minsize=1, maxsize=2, parser=self._parser_class, loop=self._loop) pools.append(pool) return pool except asyncio.TimeoutError as err: sentinel_logger.debug( "Failed to connect to Sentinel(%r) within %ss timeout", address, timeout) return err except Exception as err: sentinel_logger.debug( "Error connecting to Sentinel(%r): %r", address, err) return err async def discover_master(self, service, timeout): """Perform Master discovery for specified service.""" # TODO: get lock idle_timeout = timeout # FIXME: single timeout used 4 times; # meaning discovery can take up to: # 3 * timeout * (sentinels count) # # having one global timeout also can leed to # a problem when not all sentinels are checked. # use a copy, cause pools can change pools = self._pools[:] for sentinel in pools: try: with async_timeout(timeout, loop=self._loop): address = await self._get_masters_address( sentinel, service) pool = self._masters[service] with async_timeout(timeout, loop=self._loop), \ contextlib.ExitStack() as stack: conn = await pool._create_new_connection(address) stack.callback(conn.close) await self._verify_service_role(conn, 'master') stack.pop_all() return conn except asyncio.CancelledError: # we must correctly handle CancelledError(s): # application may be stopped or function can be cancelled # by outer timeout, so we must stop the look up. raise except asyncio.TimeoutError: continue except DiscoverError as err: sentinel_logger.debug("DiscoverError(%r, %s): %r", sentinel, service, err) await asyncio.sleep(idle_timeout, loop=self._loop) continue except RedisError as err: raise MasterReplyError("Service {} error".format(service), err) except Exception: # TODO: clear (drop) connections to schedule reconnect await asyncio.sleep(idle_timeout, loop=self._loop) continue else: raise MasterNotFoundError("No master found for {}".format(service)) async def discover_slave(self, service, timeout, **kwargs): """Perform Slave discovery for specified service.""" # TODO: use kwargs to change how slaves are picked up # (eg: round-robin, priority, random, etc) idle_timeout = timeout pools = self._pools[:] for sentinel in pools: try: with async_timeout(timeout, loop=self._loop): address = await self._get_slave_address( sentinel, service) # add **kwargs pool = self._slaves[service] with async_timeout(timeout, loop=self._loop), \ contextlib.ExitStack() as stack: conn = await pool._create_new_connection(address) stack.callback(conn.close) await self._verify_service_role(conn, 'slave') stack.pop_all() return conn except asyncio.CancelledError: raise except asyncio.TimeoutError: continue except DiscoverError: await asyncio.sleep(idle_timeout, loop=self._loop) continue except RedisError as err: raise SlaveReplyError("Service {} error".format(service), err) except Exception: await asyncio.sleep(idle_timeout, loop=self._loop) continue raise SlaveNotFoundError("No slave found for {}".format(service)) async def _get_masters_address(self, sentinel, service): # NOTE: we don't use `get-master-addr-by-name` # as it can provide stale data so we repeat # after redis-py and check service flags. state = await sentinel.execute(b'sentinel', b'master', service, encoding='utf-8') if not state: raise UnknownService() state = make_dict(state) address = state['ip'], int(state['port']) flags = set(state['flags'].split(',')) if {'s_down', 'o_down', 'disconnected'} & flags: raise BadState(state) return address async def _get_slave_address(self, sentinel, service): # Find and return single slave address slaves = await sentinel.execute(b'sentinel', b'slaves', service, encoding='utf-8') if not slaves: raise UnknownService() for state in map(make_dict, slaves): address = state['ip'], int(state['port']) flags = set(state['flags'].split(',')) if {'s_down', 'o_down', 'disconnected'} & flags: continue return address else: raise BadState(state) # XXX: only last state async def _verify_service_role(self, conn, role): res = await conn.execute(b'role', encoding='utf-8') if res[0] != role: raise RoleMismatch(res) def _need_rediscover(self, service): sentinel_logger.debug("Must redisover service %s", service) for service, pool in self._masters.items(): pool.need_rediscover() for service, pool in self._slaves.items(): pool.need_rediscover() class ManagedPool(ConnectionsPool): def __init__(self, sentinel, service, is_master, db=None, password=None, encoding=None, parser=None, *, minsize, maxsize, ssl=None, loop=None): super().__init__(_NON_DISCOVERED, db=db, password=password, encoding=encoding, minsize=minsize, maxsize=maxsize, ssl=ssl, parser=parser, loop=loop) assert self._address is _NON_DISCOVERED self._sentinel = sentinel self._service = service self._is_master = is_master self._discover_timeout = .2 @property def address(self): if self._address is _NON_DISCOVERED: return return self._address def get_connection(self, command, args=()): if self._address is _NON_DISCOVERED: return None, _NON_DISCOVERED return super().get_connection(command, args) async def _create_new_connection(self, address): if address is _NON_DISCOVERED: # Perform service discovery. # Returns Connection or raises error if no service can be found. await self._do_clear() # make `clear` blocking if self._is_master: conn = await self._sentinel.discover_master( self._service, timeout=self._sentinel.discover_timeout) else: conn = await self._sentinel.discover_slave( self._service, timeout=self._sentinel.discover_timeout) self._address = conn.address sentinel_logger.debug("Discoverred new address %r for %s", conn.address, self._service) return conn return await super()._create_new_connection(address) def _drop_closed(self): diff = len(self._pool) super()._drop_closed() diff -= len(self._pool) if diff: # closed connections were in pool: # * reset address; # * notify sentinel pool sentinel_logger.debug( "Dropped %d closed connnection(s); must rediscover", diff) self._sentinel._need_rediscover(self._service) async def acquire(self, command=None, args=()): if self._address is _NON_DISCOVERED: await self.clear() return await super().acquire(command, args) def release(self, conn): was_closed = conn.closed super().release(conn) # if connection was closed while used and not by release() if was_closed: sentinel_logger.debug( "Released closed connection; must rediscover") self._sentinel._need_rediscover(self._service) def need_rediscover(self): self._address = _NON_DISCOVERED def make_dict(plain_list): it = iter(plain_list) return dict(zip(it, it)) class DiscoverError(Exception): """Internal errors for masters/slaves discovery.""" class BadState(DiscoverError): """Bad master's / slave's state read from sentinel.""" class UnknownService(DiscoverError): """Service is not monitored by specific sentinel.""" class RoleMismatch(DiscoverError): """Service reported to have other Role.""" aioredis-1.0.0/aioredis/sentinel/commands.py0000644000175000017500000001404613203624357021714 0ustar alexeyalexey00000000000000import asyncio from ..util import wait_ok, wait_convert from ..commands import Redis from .pool import create_sentinel_pool async def create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, timeout=0.2, loop=None): """Creates Redis Sentinel client. `sentinels` is a list of sentinel nodes. """ if loop is None: loop = asyncio.get_event_loop() pool = await create_sentinel_pool(sentinels, db=db, password=password, encoding=encoding, minsize=minsize, maxsize=maxsize, ssl=ssl, timeout=timeout, loop=loop) return RedisSentinel(pool) class RedisSentinel: """Redis Sentinel client.""" def __init__(self, pool): self._pool = pool def close(self): """Close client connections.""" self._pool.close() async def wait_closed(self): """Coroutine waiting until underlying connections are closed.""" await self._pool.wait_closed() @property def closed(self): """True if connection is closed.""" return self._pool.closed def master_for(self, name): """Returns Redis client to master Redis server.""" # TODO: make class configurable return Redis(self._pool.master_for(name)) def slave_for(self, name): """Returns Redis client to slave Redis server.""" # TODO: make class configurable return Redis(self._pool.slave_for(name)) def execute(self, command, *args, **kwargs): """Execute Sentinel command. It will be prefixed with SENTINEL automatically. """ return self._pool.execute( b'SENTINEL', command, *args, **kwargs) async def ping(self): """Send PING command to Sentinel instance(s).""" # TODO: add kwargs allowing to pick sentinel to send command to. return await self._pool.execute(b'PING') def master(self, name): """Returns a dictionary containing the specified masters state.""" fut = self.execute(b'MASTER', name, encoding='utf-8') return wait_convert(fut, parse_sentinel_master) def master_address(self, name): """Returns a (host, port) pair for the given ``name``.""" fut = self.execute(b'get-master-addr-by-name', name, encoding='utf-8') return wait_convert(fut, parse_address) def masters(self): """Returns a list of dictionaries containing each master's state.""" fut = self.execute(b'MASTERS', encoding='utf-8') # TODO: process masters: we can adjust internal state return wait_convert(fut, parse_sentinel_masters) def slaves(self, name): """Returns a list of slaves for ``name``.""" fut = self.execute(b'SLAVES', name, encoding='utf-8') return wait_convert(fut, parse_sentinel_slaves_and_sentinels) def sentinels(self, name): """Returns a list of sentinels for ``name``.""" fut = self.execute(b'SENTINELS', name, encoding='utf-8') return wait_convert(fut, parse_sentinel_slaves_and_sentinels) def monitor(self, name, ip, port, quorum): """Add a new master to Sentinel to be monitored.""" fut = self.execute(b'MONITOR', name, ip, port, quorum) return wait_ok(fut) def remove(self, name): """Remove a master from Sentinel's monitoring.""" fut = self.execute(b'REMOVE', name) return wait_ok(fut) def set(self, name, option, value): """Set Sentinel monitoring parameters for a given master.""" fut = self.execute(b"SET", name, option, value) return wait_ok(fut) def failover(self, name): """Force a failover of a named master.""" fut = self.execute(b'FAILOVER', name) return wait_ok(fut) def check_quorum(self, name): """ Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. """ return self.execute(b'CKQUORUM', name) SENTINEL_STATE_TYPES = { 'can-failover-its-master': int, 'config-epoch': int, 'down-after-milliseconds': int, 'failover-timeout': int, 'info-refresh': int, 'last-hello-message': int, 'last-ok-ping-reply': int, 'last-ping-reply': int, 'last-ping-sent': int, 'master-link-down-time': int, 'master-port': int, 'num-other-sentinels': int, 'num-slaves': int, 'o-down-time': int, 'pending-commands': int, 'link-pending-commands': int, 'link-refcount': int, 'parallel-syncs': int, 'port': int, 'quorum': int, 'role-reported-time': int, 's-down-time': int, 'slave-priority': int, 'slave-repl-offset': int, 'voted-leader-epoch': int, 'flags': lambda s: frozenset(s.split(',')), # TODO: make flags enum? } def pairs_to_dict_typed(response, type_info): it = iter(response) result = {} for key, value in zip(it, it): if key in type_info: try: value = type_info[key](value) except (TypeError, ValueError): # if for some reason the value can't be coerced, just use # the string value pass result[key] = value return result def parse_sentinel_masters(response): result = {} for item in response: state = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) result[state['name']] = state return result def parse_sentinel_slaves_and_sentinels(response): return [pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) for item in response] def parse_sentinel_master(response): return pairs_to_dict_typed(response, SENTINEL_STATE_TYPES) def parse_address(value): if value is not None: return (value[0], int(value[1])) aioredis-1.0.0/aioredis/sentinel/__init__.py0000644000175000017500000000032513203624357021645 0ustar alexeyalexey00000000000000from .commands import RedisSentinel, create_sentinel from .pool import SentinelPool, create_sentinel_pool __all__ = [ "create_sentinel", "create_sentinel_pool", "RedisSentinel", "SentinelPool", ] aioredis-1.0.0/aioredis/connection.py0000644000175000017500000004404313203624357020431 0ustar alexeyalexey00000000000000import types import asyncio import socket from functools import partial from collections import deque from .util import ( encode_command, wait_ok, _NOTSET, _set_result, _set_exception, coerced_keys_dict, decode, parse_url, ) from .parser import Reader from .stream import open_connection, open_unix_connection from .errors import ( ConnectionClosedError, ConnectionForcedCloseError, RedisError, ProtocolError, ReplyError, WatchVariableError, ReadOnlyError, MaxClientsError ) from .pubsub import Channel from .abc import AbcChannel from .abc import AbcConnection from .log import logger __all__ = ['create_connection', 'RedisConnection'] MAX_CHUNK_SIZE = 65536 _PUBSUB_COMMANDS = ( 'SUBSCRIBE', b'SUBSCRIBE', 'PSUBSCRIBE', b'PSUBSCRIBE', 'UNSUBSCRIBE', b'UNSUBSCRIBE', 'PUNSUBSCRIBE', b'PUNSUBSCRIBE', ) async def create_connection(address, *, db=None, password=None, ssl=None, encoding=None, parser=None, loop=None, timeout=None, connection_cls=None): """Creates redis connection. Opens connection to Redis server specified by address argument. Address argument can be one of the following: * A tuple representing (host, port) pair for TCP connections; * A string representing either Redis URI or unix domain socket path. SSL argument is passed through to asyncio.create_connection. By default SSL/TLS is not used. By default any timeout is applied at the connection stage, however you can set a limitted time used trying to open a connection via the `timeout` Kw. Encoding argument can be used to decode byte-replies to strings. By default no decoding is done. Parser parameter can be used to pass custom Redis protocol parser class. By default hiredis.Reader is used (unless it is missing or platform is not CPython). Return value is RedisConnection instance or a connection_cls if it is given. This function is a coroutine. """ assert isinstance(address, (tuple, list, str)), "tuple or str expected" if isinstance(address, str): logger.debug("Parsing Redis URI %r", address) address, options = parse_url(address) db = options.setdefault('db', db) password = options.setdefault('password', password) encoding = options.setdefault('encoding', encoding) timeout = options.setdefault('timeout', timeout) if 'ssl' in options: assert options['ssl'] or (not options['ssl'] and not ssl), ( "Conflicting ssl options are set", options['ssl'], ssl) ssl = ssl or options['ssl'] if timeout is not None and timeout <= 0: raise ValueError("Timeout has to be None or a number greater than 0") if connection_cls: assert issubclass(connection_cls, AbcConnection),\ "connection_class does not meet the AbcConnection contract" cls = connection_cls else: cls = RedisConnection if loop is None: loop = asyncio.get_event_loop() if isinstance(address, (list, tuple)): host, port = address logger.debug("Creating tcp connection to %r", address) reader, writer = await asyncio.wait_for(open_connection( host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop), timeout, loop=loop) sock = writer.transport.get_extra_info('socket') if sock is not None: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) address = sock.getpeername() address = tuple(address[:2]) else: logger.debug("Creating unix connection to %r", address) reader, writer = await asyncio.wait_for(open_unix_connection( address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop), timeout, loop=loop) sock = writer.transport.get_extra_info('socket') if sock is not None: address = sock.getpeername() conn = cls(reader, writer, encoding=encoding, address=address, parser=parser, loop=loop) try: if password is not None: await conn.auth(password) if db is not None: await conn.select(db) except Exception: conn.close() await conn.wait_closed() raise return conn class RedisConnection(AbcConnection): """Redis connection.""" def __init__(self, reader, writer, *, address, encoding=None, parser=None, loop=None): if loop is None: loop = asyncio.get_event_loop() if parser is None: parser = Reader assert callable(parser), ( "Parser argument is not callable", parser) self._reader = reader self._writer = writer self._address = address self._loop = loop self._waiters = deque() self._reader.set_parser( parser(protocolError=ProtocolError, replyError=ReplyError) ) self._reader_task = asyncio.ensure_future(self._read_data(), loop=self._loop) self._close_msg = None self._db = 0 self._closing = False self._closed = False self._close_waiter = loop.create_future() self._reader_task.add_done_callback(self._close_waiter.set_result) self._in_transaction = None self._transaction_error = None # XXX: never used? self._in_pubsub = 0 self._pubsub_channels = coerced_keys_dict() self._pubsub_patterns = coerced_keys_dict() self._encoding = encoding def __repr__(self): return ''.format(self._db) async def _read_data(self): """Response reader task.""" last_error = ConnectionClosedError( "Connection has been closed by server") while not self._reader.at_eof(): try: obj = await self._reader.readobj() except asyncio.CancelledError: # NOTE: reader can get cancelled from `close()` method only. last_error = RuntimeError('this is unexpected') break except ProtocolError as exc: # ProtocolError is fatal # so connection must be closed if self._in_transaction is not None: self._transaction_error = exc last_error = exc break except Exception as exc: # NOTE: for QUIT command connection error can be received # before response last_error = exc break else: if (obj == b'' or obj is None) and self._reader.at_eof(): logger.debug("Connection has been closed by server," " response: %r", obj) last_error = ConnectionClosedError("Reader at end of file") break if isinstance(obj, MaxClientsError): last_error = obj break if self._in_pubsub: self._process_pubsub(obj) else: self._process_data(obj) self._closing = True self._loop.call_soon(self._do_close, last_error) def _process_data(self, obj): """Processes command results.""" assert len(self._waiters) > 0, (type(obj), obj) waiter, encoding, cb = self._waiters.popleft() if isinstance(obj, RedisError): if isinstance(obj, ReplyError): if obj.args[0].startswith('READONLY'): obj = ReadOnlyError(obj.args[0]) _set_exception(waiter, obj) if self._in_transaction is not None: self._transaction_error = obj else: if encoding is not None: try: obj = decode(obj, encoding) except Exception as exc: _set_exception(waiter, exc) return if cb is not None: try: obj = cb(obj) except Exception as exc: _set_exception(waiter, exc) return _set_result(waiter, obj) if self._in_transaction is not None: self._in_transaction.append((encoding, cb)) def _process_pubsub(self, obj, *, process_waiters=True): """Processes pubsub messages.""" kind, *args, data = obj if kind in (b'subscribe', b'unsubscribe'): chan, = args if process_waiters and self._in_pubsub and self._waiters: self._process_data(obj) if kind == b'unsubscribe': ch = self._pubsub_channels.pop(chan, None) if ch: ch.close() self._in_pubsub = data elif kind in (b'psubscribe', b'punsubscribe'): chan, = args if process_waiters and self._in_pubsub and self._waiters: self._process_data(obj) if kind == b'punsubscribe': ch = self._pubsub_patterns.pop(chan, None) if ch: ch.close() self._in_pubsub = data elif kind == b'message': chan, = args self._pubsub_channels[chan].put_nowait(data) elif kind == b'pmessage': pattern, chan = args self._pubsub_patterns[pattern].put_nowait((chan, data)) elif kind == b'pong': if process_waiters and self._in_pubsub and self._waiters: self._process_data(data or b'PONG') else: logger.warning("Unknown pubsub message received %r", obj) def execute(self, command, *args, encoding=_NOTSET): """Executes redis command and returns Future waiting for the answer. Raises: * TypeError if any of args can not be encoded as bytes. * ReplyError on redis '-ERR' resonses. * ProtocolError when response can not be decoded meaning connection is broken. """ if self._reader is None or self._reader.at_eof(): msg = self._close_msg or "Connection closed or corrupted" raise ConnectionClosedError(msg) if command is None: raise TypeError("command must not be None") if None in args: raise TypeError("args must not contain None") command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS is_ping = command in ('PING', b'PING') if self._in_pubsub and not (is_pubsub or is_ping): raise RedisError("Connection in SUBSCRIBE mode") elif is_pubsub: logger.warning("Deprecated. Use `execute_pubsub` method directly") return self.execute_pubsub(command, *args) if command in ('SELECT', b'SELECT'): cb = partial(self._set_db, args=args) elif command in ('MULTI', b'MULTI'): cb = self._start_transaction elif command in ('EXEC', b'EXEC'): cb = partial(self._end_transaction, discard=False) elif command in ('DISCARD', b'DISCARD'): cb = partial(self._end_transaction, discard=True) else: cb = None if encoding is _NOTSET: encoding = self._encoding fut = self._loop.create_future() self._writer.write(encode_command(command, *args)) self._waiters.append((fut, encoding, cb)) return fut def execute_pubsub(self, command, *channels): """Executes redis (p)subscribe/(p)unsubscribe commands. Returns asyncio.gather coroutine waiting for all channels/patterns to receive answers. """ command = command.upper().strip() assert command in _PUBSUB_COMMANDS, ( "Pub/Sub command expected", command) if self._reader is None or self._reader.at_eof(): raise ConnectionClosedError("Connection closed or corrupted") if None in set(channels): raise TypeError("args must not contain None") if not len(channels): raise TypeError("No channels/patterns supplied") is_pattern = len(command) in (10, 12) mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop) channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch) for ch in channels] if not all(ch.is_pattern == is_pattern for ch in channels): raise ValueError("Not all channels {} match command {}" .format(channels, command)) cmd = encode_command(command, *(ch.name for ch in channels)) res = [] for ch in channels: fut = self._loop.create_future() res.append(fut) cb = partial(self._update_pubsub, ch=ch) self._waiters.append((fut, None, cb)) self._writer.write(cmd) return asyncio.gather(*res, loop=self._loop) def close(self): """Close connection.""" self._do_close(ConnectionForcedCloseError()) def _do_close(self, exc): if self._closed: return self._closed = True self._closing = False self._writer.transport.close() self._reader_task.cancel() self._reader_task = None self._writer = None self._reader = None if exc is not None: self._close_msg = str(exc) while self._waiters: waiter, *spam = self._waiters.popleft() logger.debug("Cancelling waiter %r", (waiter, spam)) if exc is None: _set_exception(waiter, ConnectionForcedCloseError()) else: _set_exception(waiter, exc) while self._pubsub_channels: _, ch = self._pubsub_channels.popitem() logger.debug("Closing pubsub channel %r", ch) ch.close() while self._pubsub_patterns: _, ch = self._pubsub_patterns.popitem() logger.debug("Closing pubsub pattern %r", ch) ch.close() @property def closed(self): """True if connection is closed.""" closed = self._closing or self._closed if not closed and self._reader and self._reader.at_eof(): self._closing = closed = True self._loop.call_soon(self._do_close, None) return closed async def wait_closed(self): """Coroutine waiting until connection is closed.""" await asyncio.shield(self._close_waiter, loop=self._loop) @property def db(self): """Currently selected db index.""" return self._db @property def encoding(self): """Current set codec or None.""" return self._encoding @property def address(self): """Redis server address, either host-port tuple or str.""" return self._address def select(self, db): """Change the selected database for the current connection.""" if not isinstance(db, int): raise TypeError("DB must be of int type, not {!r}".format(db)) if db < 0: raise ValueError("DB must be greater or equal 0, got {!r}" .format(db)) fut = self.execute('SELECT', db) return wait_ok(fut) def _set_db(self, ok, args): assert ok in {b'OK', 'OK'}, ("Unexpected result of SELECT", ok) self._db = args[0] return ok def _start_transaction(self, ok): assert self._in_transaction is None, ( "Connection is already in transaction", self._in_transaction) self._in_transaction = deque() self._transaction_error = None return ok def _end_transaction(self, obj, discard): assert self._in_transaction is not None, ( "Connection is not in transaction", obj) self._transaction_error = None recall, self._in_transaction = self._in_transaction, None recall.popleft() # ignore first (its _start_transaction) if discard: return obj assert isinstance(obj, list) or (obj is None and not discard), ( "Unexpected MULTI/EXEC result", obj, recall) # TODO: need to be able to re-try transaction if obj is None: err = WatchVariableError("WATCH variable has changed") obj = [err] * len(recall) assert len(obj) == len(recall), ( "Wrong number of result items in mutli-exec", obj, recall) res = [] for o, (encoding, cb) in zip(obj, recall): if not isinstance(o, RedisError): try: if encoding: o = decode(o, encoding) if cb: o = cb(o) except Exception as err: res.append(err) continue res.append(o) return res def _update_pubsub(self, obj, *, ch): kind, *pattern, channel, subscriptions = obj self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub if kind == b'subscribe' and channel not in self._pubsub_channels: self._pubsub_channels[channel] = ch elif kind == b'psubscribe' and channel not in self._pubsub_patterns: self._pubsub_patterns[channel] = ch if not was_in_pubsub: self._process_pubsub(obj, process_waiters=False) return obj @property def in_transaction(self): """Set to True when MULTI command was issued.""" return self._in_transaction is not None @property def in_pubsub(self): """Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. """ return self._in_pubsub @property def pubsub_channels(self): """Returns read-only channels dict.""" return types.MappingProxyType(self._pubsub_channels) @property def pubsub_patterns(self): """Returns read-only patterns dict.""" return types.MappingProxyType(self._pubsub_patterns) def auth(self, password): """Authenticate to server.""" fut = self.execute('AUTH', password) return wait_ok(fut) aioredis-1.0.0/aioredis/stream.py0000644000175000017500000000563413203624357017570 0ustar alexeyalexey00000000000000import asyncio __all__ = [ 'open_connection', 'open_unix_connection', 'StreamReader', ] async def open_connection(host=None, port=None, *, limit, loop=None, parser=None, **kwds): # XXX: parser is not used (yet) if loop is None: loop = asyncio.get_event_loop() reader = StreamReader(limit=limit, loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection( lambda: protocol, host, port, **kwds) writer = asyncio.StreamWriter(transport, protocol, reader, loop) return reader, writer async def open_unix_connection(address, *, limit, loop=None, parser=None, **kwds): # XXX: parser is not used (yet) if loop is None: loop = asyncio.get_event_loop() reader = StreamReader(limit=limit, loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_unix_connection( lambda: protocol, address, **kwds) writer = asyncio.StreamWriter(transport, protocol, reader, loop) return reader, writer class StreamReader(asyncio.StreamReader): """ Override the official StreamReader to address the following issue: http://bugs.python.org/issue30861 Also it leverages to get rid of the dobule buffer and get rid of one coroutine step. Data flows from the buffer to the Redis parser directly. """ def set_parser(self, parser): # XXX: we'll get AttributeError unless set_parser is called self._parser = parser def feed_data(self, data): assert not self._eof, 'feed_data after feed_eof' if not data: return self._parser.feed(data) self._wakeup_waiter() # TODO: implement pause the read. Its needed # expose the len of the buffer from hiredis # to make it possible. async def readobj(self): """ Return a parsed Redis object or an exception when something wrong happened. """ while True: obj = self._parser.gets() if obj is not False: # TODO: implement resume the read # Return any valid object and the Nil->None # case. When its False there is nothing there # to be parsed and we have to wait for more data. return obj if self._exception: raise self._exception if self._eof: break await self._wait_for_data('readobj') # NOTE: after break we return None which must be handled as b'' async def _read_not_allowed(self, *args, **kwargs): raise RuntimeError('Use readobj') read = _read_not_allowed readline = _read_not_allowed readuntil = _read_not_allowed readexactly = _read_not_allowed aioredis-1.0.0/aioredis/__init__.py0000644000175000017500000000257413203631137020026 0ustar alexeyalexey00000000000000from .connection import RedisConnection, create_connection from .commands import ( Redis, create_redis, create_redis_pool, GeoPoint, GeoMember, ) from .pool import ConnectionsPool, create_pool from .pubsub import Channel from .sentinel import RedisSentinel, create_sentinel from .errors import ( ConnectionClosedError, ConnectionForcedCloseError, MasterNotFoundError, MultiExecError, PipelineError, ProtocolError, ReadOnlyError, RedisError, ReplyError, MaxClientsError, AuthError, ChannelClosedError, WatchVariableError, PoolClosedError, SlaveNotFoundError, MasterReplyError, SlaveReplyError, ) __version__ = '1.0.0' __all__ = [ # Factories 'create_connection', 'create_pool', 'create_redis', 'create_redis_pool', 'create_sentinel', # Classes 'RedisConnection', 'ConnectionsPool', 'Redis', 'GeoPoint', 'GeoMember', 'Channel', 'RedisSentinel', # Errors 'RedisError', 'ReplyError', 'MaxClientsError', 'AuthError', 'ProtocolError', 'PipelineError', 'MultiExecError', 'WatchVariableError', 'ConnectionClosedError', 'ConnectionForcedCloseError', 'PoolClosedError', 'ChannelClosedError', 'MasterNotFoundError', 'SlaveNotFoundError', 'ReadOnlyError', 'MasterReplyError', 'SlaveReplyError', ] aioredis-1.0.0/aioredis/abc.py0000644000175000017500000000737013203624357017021 0ustar alexeyalexey00000000000000"""The module provides connection and connections pool interfaces. These are intended to be used for implementing custom connection managers. """ import abc import asyncio from abc import ABC __all__ = [ 'AbcConnection', 'AbcPool', 'AbcChannel', ] class AbcConnection(ABC): """Abstract connection interface.""" @abc.abstractmethod def execute(self, command, *args, **kwargs): """Execute redis command.""" @abc.abstractmethod def execute_pubsub(self, command, *args, **kwargs): """Execute Redis (p)subscribe/(p)unsubscribe commands.""" @abc.abstractmethod def close(self): """Perform connection(s) close and resources cleanup.""" @asyncio.coroutine @abc.abstractmethod def wait_closed(self): """ Coroutine waiting until all resources are closed/released/cleaned up. """ @property @abc.abstractmethod def closed(self): """Flag indicating if connection is closing or already closed.""" @property @abc.abstractmethod def db(self): """Current selected DB index.""" @property @abc.abstractmethod def encoding(self): """Current set connection codec.""" @property @abc.abstractmethod def in_pubsub(self): """Returns number of subscribed channels. Can be tested as bool indicating Pub/Sub mode state. """ @property @abc.abstractmethod def pubsub_channels(self): """Read-only channels dict.""" @property @abc.abstractmethod def pubsub_patterns(self): """Read-only patterns dict.""" @property @abc.abstractmethod def address(self): """Connection address.""" class AbcPool(AbcConnection): """Abstract connections pool interface. Inherited from AbcConnection so both have common interface for executing Redis commands. """ @abc.abstractmethod def get_connection(self): # TODO: arguments """ Gets free connection from pool in a sync way. If no connection available — returns None. """ @asyncio.coroutine @abc.abstractmethod def acquire(self): # TODO: arguments """Acquires connection from pool.""" @abc.abstractmethod def release(self, conn): # TODO: arguments """Releases connection to pool. :param AbcConnection conn: Owned connection to be released. """ @property @abc.abstractmethod def address(self): """Connection address or None.""" class AbcChannel(ABC): """Abstract Pub/Sub Channel interface.""" @property @abc.abstractmethod def name(self): """Encoded channel name or pattern.""" @property @abc.abstractmethod def is_pattern(self): """Boolean flag indicating if channel is pattern channel.""" @property @abc.abstractmethod def is_active(self): """Flag indicating that channel has unreceived messages and not marked as closed.""" @asyncio.coroutine @abc.abstractmethod def get(self): """Wait and return new message. Will raise ``ChannelClosedError`` if channel is not active. """ # wait_message is not required; details of implementation # @abc.abstractmethod # def wait_message(self): # pass @abc.abstractmethod def put_nowait(self, data): """Send data to channel. Called by RedisConnection when new message received. For pattern subscriptions data will be a tuple of channel name and message itself. """ @abc.abstractmethod def close(self): """Marks Channel as closed, no more messages will be sent to it. Called by RedisConnection when channel is unsubscribed or connection is closed. """ aioredis-1.0.0/aioredis/errors.py0000644000175000017500000000474113203624357017607 0ustar alexeyalexey00000000000000__all__ = [ 'RedisError', 'ProtocolError', 'ReplyError', 'MaxClientsError', 'AuthError', 'PipelineError', 'MultiExecError', 'WatchVariableError', 'ChannelClosedError', 'ConnectionClosedError', 'ConnectionForcedCloseError', 'PoolClosedError', 'MasterNotFoundError', 'SlaveNotFoundError', 'ReadOnlyError', ] class RedisError(Exception): """Base exception class for aioredis exceptions.""" class ProtocolError(RedisError): """Raised when protocol error occurs.""" class ReplyError(RedisError): """Raised for redis error replies (-ERR).""" MATCH_REPLY = None def __new__(cls, msg, *args): for klass in cls.__subclasses__(): if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY): return klass(msg, *args) return super().__new__(cls, msg, *args) class MaxClientsError(ReplyError): """Raised for redis server when the maximum number of client has been reached.""" MATCH_REPLY = "ERR max number of clients reached" class AuthError(ReplyError): """Raised when authentication errors occurs.""" MATCH_REPLY = ("NOAUTH ", "ERR invalid password") class PipelineError(RedisError): """Raised if command within pipeline raised error.""" def __init__(self, errors): super().__init__('{} errors:'.format(self.__class__.__name__), errors) class MultiExecError(PipelineError): """Raised if command within MULTI/EXEC block caused error.""" class WatchVariableError(MultiExecError): """Raised if watched variable changed (EXEC returns None).""" class ChannelClosedError(RedisError): """Raised when Pub/Sub channel is unsubscribed and messages queue is empty. """ class ReadOnlyError(RedisError): """Raised from slave when read-only mode is enabled""" class MasterNotFoundError(RedisError): """Raised for sentinel master not found error.""" class SlaveNotFoundError(RedisError): """Raised for sentinel slave not found error.""" class MasterReplyError(RedisError): """Raised by sentinel client for master error replies.""" class SlaveReplyError(RedisError): """Raised by sentinel client for slave error replies.""" class ConnectionClosedError(RedisError): """Raised if connection to server was closed.""" class ConnectionForcedCloseError(ConnectionClosedError): """Raised if connection was closed with .close() method.""" class PoolClosedError(RedisError): """Raised if pool is closed.""" aioredis-1.0.0/aioredis/parser.py0000644000175000017500000001145313203624357017565 0ustar alexeyalexey00000000000000from .errors import ProtocolError, ReplyError __all__ = [ 'Reader', 'PyReader', ] class PyReader: """Pure-Python Redis protocol parser that follows hiredis.Reader interface (except setmaxbuf/getmaxbuf). """ def __init__(self, protocolError=ProtocolError, replyError=ReplyError, encoding=None): if not callable(protocolError): raise TypeError("Expected a callable") if not callable(replyError): raise TypeError("Expected a callable") self._parser = Parser(protocolError, replyError, encoding) def feed(self, data, o=0, l=-1): """Feed data to parser.""" if l == -1: l = len(data) - o if o < 0 or l < 0: raise ValueError("negative input") if o + l > len(data): raise ValueError("input is larger than buffer size") self._parser.buf.extend(data[o:o+l]) def gets(self): """Get parsed value or False otherwise. Error replies are return as replyError exceptions (not raised). Protocol errors are raised. """ return self._parser.parse_one() def setmaxbuf(self, size): """No-op.""" pass def getmaxbuf(self): """No-op.""" return 0 class Parser: def __init__(self, protocolError, replyError, encoding): self.buf = bytearray() self.pos = 0 self.protocolError = protocolError self.replyError = replyError self.encoding = encoding self._err = None self._gen = None def waitsome(self, size): # keep yielding false until at least `size` bytes added to buf. while len(self.buf) < self.pos+size: yield False def waitany(self): yield from self.waitsome(len(self.buf) + 1) def readone(self): if not self.buf[self.pos:1]: yield from self.waitany() val = self.buf[self.pos:1] self.pos += 1 return val def readline(self, size=None): if size is not None: if len(self.buf) < size + 2 + self.pos: yield from self.waitsome(size + 2) offset = self.pos + size if self.buf[offset:offset+2] != b'\r\n': raise self.error("Expected b'\r\n'") else: offset = self.buf.find(b'\r\n', self.pos) while offset < 0: yield from self.waitany() offset = self.buf.find(b'\r\n', self.pos) val = self.buf[self.pos:offset] self.pos = 0 del self.buf[:offset + 2] return val def readint(self): try: return int((yield from self.readline())) except ValueError as exc: raise self.error(exc) def error(self, msg): self._err = self.protocolError(msg) return self._err def parse(self, is_bulk=False): if self._err is not None: raise self._err ctl = yield from self.readone() if ctl == b'+': val = yield from self.readline() if self.encoding is not None: try: return val.decode(self.encoding) except UnicodeDecodeError: pass return bytes(val) elif ctl == b'-': val = yield from self.readline() return self.replyError(val.decode('utf-8')) elif ctl == b':': return (yield from self.readint()) elif ctl == b'$': val = yield from self.readint() if val == -1: return None val = yield from self.readline(val) if self.encoding: try: return val.decode(self.encoding) except UnicodeDecodeError: pass return bytes(val) elif ctl == b'*': val = yield from self.readint() if val == -1: return None bulk_array = [] error = None for _ in range(val): try: bulk_array.append((yield from self.parse(is_bulk=True))) except LookupError as err: if error is None: error = err if error is not None: raise error return bulk_array else: raise self.error("Invalid first byte: {!r}".format(ctl)) def parse_one(self): if self._gen is None: self._gen = self.parse() try: self._gen.send(None) except StopIteration as exc: self._gen = None return exc.value except Exception: self._gen = None raise else: return False try: import hiredis Reader = hiredis.Reader except ImportError: Reader = PyReader aioredis-1.0.0/aioredis/pubsub.py0000644000175000017500000003032513203624357017570 0ustar alexeyalexey00000000000000import asyncio import json import types from .abc import AbcChannel from .util import _converters, _set_result from .errors import ChannelClosedError from .log import logger __all__ = [ "Channel", "EndOfStream", "Receiver", ] # End of pubsub messages stream marker. EndOfStream = object() class Channel(AbcChannel): """Wrapper around asyncio.Queue.""" # doesn't make much sense with inheritance # __slots__ = ('_queue', '_name', # '_closed', '_waiter', # '_is_pattern', '_loop') def __init__(self, name, is_pattern, loop=None): self._queue = asyncio.Queue(loop=loop) self._name = _converters[type(name)](name) self._is_pattern = is_pattern self._loop = loop self._closed = False self._waiter = None def __repr__(self): return "<{} name:{!r}, is_pattern:{}, qsize:{}>".format( self.__class__.__name__, self._name, self._is_pattern, self._queue.qsize()) @property def name(self): """Encoded channel name/pattern.""" return self._name @property def is_pattern(self): """Set to True if channel is subscribed to pattern.""" return self._is_pattern @property def is_active(self): """Returns True until there are messages in channel or connection is subscribed to it. Can be used with ``while``: >>> ch = conn.pubsub_channels['chan:1'] >>> while ch.is_active: ... msg = await ch.get() # may stuck for a long time """ return not (self._queue.qsize() <= 1 and self._closed) async def get(self, *, encoding=None, decoder=None): """Coroutine that waits for and returns a message. :raises aioredis.ChannelClosedError: If channel is unsubscribed and has no messages. """ assert decoder is None or callable(decoder), decoder if not self.is_active: if self._queue.qsize() == 1: msg = self._queue.get_nowait() assert msg is None, msg return raise ChannelClosedError() msg = await self._queue.get() if msg is None: # TODO: maybe we need an explicit marker for "end of stream" # currently, returning None may overlap with # possible return value from `decoder` # so the user would have to check `ch.is_active` # to determine if its EoS or payload return if self._is_pattern: dest_channel, msg = msg if encoding is not None: msg = msg.decode(encoding) if decoder is not None: msg = decoder(msg) if self._is_pattern: return dest_channel, msg return msg async def get_json(self, encoding='utf-8'): """Shortcut to get JSON messages.""" return (await self.get(encoding=encoding, decoder=json.loads)) def iter(self, *, encoding=None, decoder=None): """Same as get method but its native coroutine. Usage example: >>> async for msg in ch.iter(): ... print(msg) """ return _IterHelper(self, is_active=lambda ch: ch.is_active, encoding=encoding, decoder=decoder) async def wait_message(self): """Waits for message to become available in channel. Possible usage: >>> while (await ch.wait_message()): ... msg = await ch.get() """ if not self.is_active: return False if not self._queue.empty(): return True if self._waiter is None: self._waiter = self._loop.create_future() await self._waiter return self.is_active # internal methods def put_nowait(self, data): self._queue.put_nowait(data) if self._waiter is not None: fut, self._waiter = self._waiter, None _set_result(fut, None, self) def close(self): """Marks channel as inactive. Internal method, will be called from connection on `unsubscribe` command. """ if not self._closed: self.put_nowait(None) self._closed = True class _IterHelper: __slots__ = ('_ch', '_is_active', '_args', '_kw') def __init__(self, ch, is_active, *args, **kw): self._ch = ch self._is_active = is_active self._args = args self._kw = kw def __aiter__(self): return self async def __anext__(self): if not self._is_active(self._ch): raise StopAsyncIteration # noqa msg = await self._ch.get(*self._args, **self._kw) if msg is None: raise StopAsyncIteration # noqa return msg class Receiver: """Multi-producers, single-consumer Pub/Sub queue. Can be used in cases where a single consumer task must read messages from several different channels (where pattern subscriptions may not work well or channels can be added/removed dynamically). Example use case: >>> from aioredis.pubsub import Receiver >>> from aioredis.abc import AbcChannel >>> mpsc = Receiver(loop=loop) >>> async def reader(mpsc): ... async for channel, msg in mpsc.iter(): ... assert isinstance(channel, AbcChannel) ... print("Got {!r} in channel {!r}".format(msg, channel)) >>> asyncio.ensure_future(reader(mpsc)) >>> await redis.subscribe(mpsc.channel('channel:1'), ... mpsc.channel('channel:3')) ... mpsc.channel('channel:5')) >>> await redis.psubscribe(mpsc.pattern('hello')) >>> # publishing 'Hello world' into 'hello-channel' >>> # will print this message: Got b'Hello world' in channel b'hello-channel' >>> # when all is done: >>> await redis.unsubscribe('channel:1', 'channel:3', 'channel:5') >>> await redis.punsubscribe('hello') >>> mpsc.stop() >>> # any message received after stop() will be ignored. """ def __init__(self, loop=None): if loop is None: loop = asyncio.get_event_loop() self._queue = asyncio.Queue(loop=loop) self._refs = {} self._waiter = None self._running = True self._loop = loop def __repr__(self): return ('' .format(self.is_active, len(self._refs), self._queue.qsize())) def channel(self, name): """Create a channel. Returns ``_Sender`` object implementing :class:`~aioredis.abc.AbcChannel`. """ enc_name = _converters[type(name)](name) if (enc_name, False) not in self._refs: ch = _Sender(self, enc_name, is_pattern=False, loop=self._loop) self._refs[(enc_name, False)] = ch return ch return self._refs[(enc_name, False)] def pattern(self, pattern): """Create a pattern channel. Returns ``_Sender`` object implementing :class:`~aioredis.abc.AbcChannel`. """ enc_pattern = _converters[type(pattern)](pattern) if (enc_pattern, True) not in self._refs: ch = _Sender(self, enc_pattern, is_pattern=True, loop=self._loop) self._refs[(enc_pattern, True)] = ch return self._refs[(enc_pattern, True)] @property def channels(self): """Read-only channels dict.""" return types.MappingProxyType({ ch.name: ch for ch in self._refs.values() if not ch.is_pattern}) @property def patterns(self): """Read-only patterns dict.""" return types.MappingProxyType({ ch.name: ch for ch in self._refs.values() if ch.is_pattern}) async def get(self, *, encoding=None, decoder=None): """Wait for and return pub/sub message from one of channels. Return value is either: * tuple of two elements: channel & message; * tuple of three elements: pattern channel, (target channel & message); * or None in case Receiver is not active or has just been stopped. :raises aioredis.ChannelClosedError: If listener is stopped and all messages have been received. """ assert decoder is None or callable(decoder), decoder if not self.is_active: if not self._running: # inactive but running raise ChannelClosedError() return obj = await self._queue.get() if obj is EndOfStream: return ch, msg = obj if ch.is_pattern: dest_ch, msg = msg if encoding is not None: msg = msg.decode(encoding) if decoder is not None: msg = decoder(msg) if ch.is_pattern: return ch, (dest_ch, msg) return ch, msg async def wait_message(self): """Blocks until new message appear.""" if not self._queue.empty(): return True if not self._running: return False if self._waiter is None: self._waiter = self._loop.create_future() await self._waiter return self.is_active @property def is_active(self): """Returns True if listener has any active subscription.""" if not self._queue.empty(): return True # NOTE: this expression requires at least one subscriber # to return True; return (self._running and any(ch.is_active for ch in self._refs.values())) def stop(self): """Stop receiving messages. All new messages after this call will be ignored, so you must call unsubscribe before stopping this listener. """ self._running = False self._put_nowait(EndOfStream, sender=None) def iter(self, *, encoding=None, decoder=None): """Returns async iterator. Usage example: >>> async for ch, msg in mpsc.iter(): ... print(ch, msg) """ return _IterHelper(self, is_active=lambda r: r.is_active or r._running, encoding=encoding, decoder=decoder) # internal methods def _put_nowait(self, data, *, sender): if not self._running and data is not EndOfStream: logger.warning("Pub/Sub listener message after stop:" " sender: %r, data: %r", sender, data) return if data is not EndOfStream: data = (sender, data) self._queue.put_nowait(data) if self._waiter is not None: fut, self._waiter = self._waiter, None _set_result(fut, None, self) def _close(self, sender): self._refs.pop((sender.name, sender.is_pattern)) class _Sender(AbcChannel): """Write-Only Channel. Does not allow direct ``.get()`` calls. """ def __init__(self, receiver, name, is_pattern, *, loop): self._receiver = receiver self._name = _converters[type(name)](name) self._is_pattern = is_pattern self._loop = loop self._closed = False def __repr__(self): return "<{} name:{!r}, is_pattern:{}, receiver:{!r}>".format( self.__class__.__name__, self._name, self._is_pattern, self._receiver) @property def name(self): """Encoded channel name or pattern.""" return self._name @property def is_pattern(self): """Set to True if channel is subscribed to pattern.""" return self._is_pattern @property def is_active(self): return not self._closed async def get(self, *, encoding=None, decoder=None): raise RuntimeError("MPSC channel does not allow direct get() calls") def put_nowait(self, data): self._receiver._put_nowait(data, sender=self) def close(self): # TODO: close() is exclusive so we can not share same _Sender # between different connections. # This needs to be fixed. if self._closed: return self._closed = True self._receiver._close(self) aioredis-1.0.0/aioredis/util.py0000644000175000017500000001366613203624357017256 0ustar alexeyalexey00000000000000from urllib.parse import urlparse, parse_qsl from .log import logger _NOTSET = object() # NOTE: never put here anything else; # just this basic types _converters = { bytes: lambda val: val, bytearray: lambda val: val, str: lambda val: val.encode('utf-8'), int: lambda val: str(val).encode('utf-8'), float: lambda val: str(val).encode('utf-8'), } def _bytes_len(sized): return str(len(sized)).encode('utf-8') def encode_command(*args): """Encodes arguments into redis bulk-strings array. Raises TypeError if any of args not of bytearray, bytes, float, int, or str type. """ buf = bytearray() def add(data): return buf.extend(data + b'\r\n') add(b'*' + _bytes_len(args)) for arg in args: if type(arg) in _converters: barg = _converters[type(arg)](arg) add(b'$' + _bytes_len(barg)) add(barg) else: raise TypeError("Argument {!r} expected to be of bytearray, bytes," " float, int, or str type".format(arg)) return buf def decode(obj, encoding): if isinstance(obj, bytes): return obj.decode(encoding) elif isinstance(obj, list): return [decode(o, encoding) for o in obj] return obj async def wait_ok(fut): res = await fut if res in (b'QUEUED', 'QUEUED'): return res return res in (b'OK', 'OK') async def wait_convert(fut, type_, **kwargs): result = await fut if result in (b'QUEUED', 'QUEUED'): return result return type_(result, **kwargs) async def wait_make_dict(fut): res = await fut if res in (b'QUEUED', 'QUEUED'): return res it = iter(res) return dict(zip(it, it)) class coerced_keys_dict(dict): def __getitem__(self, other): if not isinstance(other, bytes): other = _converters[type(other)](other) return dict.__getitem__(self, other) def __contains__(self, other): if not isinstance(other, bytes): other = _converters[type(other)](other) return dict.__contains__(self, other) class _ScanIter: __slots__ = ('_scan', '_cur', '_ret') def __init__(self, scan): self._scan = scan self._cur = b'0' self._ret = [] def __aiter__(self): return self async def __anext__(self): while not self._ret and self._cur: self._cur, self._ret = await self._scan(self._cur) if not self._cur and not self._ret: raise StopAsyncIteration # noqa else: ret = self._ret.pop(0) return ret def _set_result(fut, result, *info): if fut.done(): logger.debug("Waiter future is already done %r %r", fut, info) assert fut.cancelled(), ( "waiting future is in wrong state", fut, result, info) else: fut.set_result(result) def _set_exception(fut, exception): if fut.done(): logger.debug("Waiter future is already done %r", fut) assert fut.cancelled(), ( "waiting future is in wrong state", fut, exception) else: fut.set_exception(exception) def parse_url(url): """Parse Redis connection URI. Parse according to IANA specs: * https://www.iana.org/assignments/uri-schemes/prov/redis * https://www.iana.org/assignments/uri-schemes/prov/rediss Also more rules applied: * empty scheme is treated as unix socket path no further parsing is done. * 'unix://' scheme is treated as unix socket path and parsed. * Multiple query parameter values and blank values are considered error. * DB number specified as path and as query parameter is considered error. * Password specified in userinfo and as query parameter is considered error. """ r = urlparse(url) assert r.scheme in ('', 'redis', 'rediss', 'unix'), ( "Unsupported URI scheme", r.scheme) if r.scheme == '': return url, {} query = {} for p, v in parse_qsl(r.query, keep_blank_values=True): assert p not in query, ("Multiple parameters are not allowed", p, v) assert v, ("Empty parameters are not allowed", p, v) query[p] = v if r.scheme == 'unix': assert r.path, ("Empty path is not allowed", url) assert not r.netloc, ( "Netlocation is not allowed for unix scheme", r.netloc) return r.path, _parse_uri_options(query, '', r.password) address = (r.hostname or 'localhost', int(r.port or 6379)) path = r.path if path.startswith('/'): path = r.path[1:] options = _parse_uri_options(query, path, r.password) if r.scheme == 'rediss': options['ssl'] = True return address, options def _parse_uri_options(params, path, password): def parse_db_num(val): if not val: return assert val.isdecimal(), ("Invalid decimal integer", val) assert val == '0' or not val.startswith('0'), ( "Expected integer without leading zeroes", val) return int(val) options = {} db1 = parse_db_num(path) db2 = parse_db_num(params.get('db')) assert db1 is None or db2 is None, ( "Single DB value expected, got path and query", db1, db2) if db1 is not None: options['db'] = db1 elif db2 is not None: options['db'] = db2 password2 = params.get('password') assert not password or not password2, ( "Single password value is expected, got in net location and query") if password: options['password'] = password elif password2: options['password'] = password2 if 'encoding' in params: options['encoding'] = params['encoding'] if 'ssl' in params: assert params['ssl'] in ('true', 'false'), ( "Expected 'ssl' param to be 'true' or 'false' only", params['ssl']) options['ssl'] = params['ssl'] == 'true' if 'timeout' in params: options['timeout'] = float(params['timeout']) return options aioredis-1.0.0/LICENSE0000644000175000017500000000207713203624357015127 0ustar alexeyalexey00000000000000The MIT License (MIT) Copyright (c) 2014-2017 Alexey Popravka Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. aioredis-1.0.0/README.rst0000644000175000017500000000706213203624357015610 0ustar alexeyalexey00000000000000aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://codecov.io/gh/aio-libs/aioredis/branch/master/graph/badge.svg :target: https://codecov.io/gh/aio-libs/aioredis .. image:: https://ci.appveyor.com/api/projects/status/wngyx6s98o6hsxmt/branch/master?svg=true :target: https://ci.appveyor.com/project/popravich/aioredis Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser Yes Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes SSL/TLS support Yes Sentinel support Yes [1]_ Redis Cluster support WIP Trollius (python 2.7) No Tested CPython versions `3.5, 3.6 `_ [2]_ Tested PyPy3 versions `5.9.0 `_ Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ Support for dev Redis server through low-level API ================================ ============================== .. [1] Sentinel support is available in master branch. This feature is not yet stable and may have some issues. .. [2] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): conn = await aioredis.create_connection( 'redis://localhost', loop=loop) await conn.execute('set', 'my-key', 'value') val = await conn.execute('get', 'my-key') print(val) conn.close() await conn.wait_closed() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): redis = await aioredis.create_redis( 'redis://localhost', loop=loop) await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) redis.close() await redis.wait_closed() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): pool = await aioredis.create_pool( 'redis://localhost', minsize=5, maxsize=10, loop=loop) await pool.execute('set', 'my-key', 'value') print(await pool.execute('get', 'my-key')) # graceful shutdown pool.close() await pool.wait_closed() loop.run_until_complete(go()) Requirements ------------ * Python_ 3.5.3+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python protocol parser is implemented as well and can be used through ``parser`` parameter. Benchmarks ---------- Benchmarks can be found here: https://github.com/popravich/python-redis-benchmark Discussion list --------------- *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs Or gitter room: https://gitter.im/aio-libs/Lobby License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _hiredis: https://pypi.python.org/pypi/hiredis .. _travis: https://travis-ci.org/aio-libs/aioredis aioredis-1.0.0/docs/0000755000175000017500000000000013203634127015040 5ustar alexeyalexey00000000000000aioredis-1.0.0/docs/releases.rst0000664000175000017500000000021213012410141017354 0ustar alexeyalexey00000000000000Releases ======== Recent ------ .. include:: ../CHANGES.txt :start-line: 2 ---- Historical ---------- .. include:: ../HISTORY.rst aioredis-1.0.0/docs/_build/0000755000175000017500000000000013203634127016276 5ustar alexeyalexey00000000000000aioredis-1.0.0/docs/_build/man/0000755000175000017500000000000013203634127017051 5ustar alexeyalexey00000000000000aioredis-1.0.0/docs/_build/man/aioredis.10000644000175000017500000037653113203634127020751 0ustar alexeyalexey00000000000000.\" Man page generated from reStructuredText. . .TH "AIOREDIS" "1" "Nov 17, 2017" "1.0" "aioredis" .SH NAME aioredis \- aioredis Documentation . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .sp asyncio (\fI\%PEP 3156\fP) Redis client library. .sp The library is intended to provide simple and clear interface to Redis based on asyncio\&. .SH FEATURES .TS center; |l|l|. _ T{ hiredis parser T} T{ Yes T} _ T{ Pure\-python parser T} T{ Yes T} _ T{ Low\-level & High\-level APIs T} T{ Yes T} _ T{ Connections Pool T} T{ Yes T} _ T{ Pipelining support T} T{ Yes T} _ T{ Pub/Sub support T} T{ Yes T} _ T{ Sentinel support T} T{ Yes [1] T} _ T{ Redis Cluster support T} T{ WIP T} _ T{ Trollius (python 2.7) T} T{ No T} _ T{ Tested CPython versions T} T{ \fI\%3.5, 3.6\fP [2] T} _ T{ Tested PyPy3 versions T} T{ \fI\%5.9.0\fP T} _ T{ Tested for Redis server T} T{ \fI\%2.6, 2.8, 3.0, 3.2, 4.0\fP T} _ T{ Support for dev Redis server T} T{ through low\-level API T} _ .TE .IP [1] 5 Sentinel support is available in master branch. This feature is not yet stable and may have some issues. .IP [2] 5 For Python 3.3, 3.4 support use aioredis v0.3. .SH INSTALLATION .sp The easiest way to install aioredis is by using the package on PyPi: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pip install aioredis .ft P .fi .UNINDENT .UNINDENT .SH REQUIREMENTS .INDENT 0.0 .IP \(bu 2 Python 3.5.3+ .IP \(bu 2 hiredis .UNINDENT .SH BENCHMARKS .sp Benchmarks can be found here: \fI\%https://github.com/popravich/python\-redis\-benchmark\fP .SH CONTRIBUTE .INDENT 0.0 .IP \(bu 2 Issue Tracker: \fI\%https://github.com/aio\-libs/aioredis/issues\fP .IP \(bu 2 Source Code: \fI\%https://github.com/aio\-libs/aioredis\fP .IP \(bu 2 Contributor’s guide: devel .UNINDENT .sp Feel free to file an issue or make pull request if you find any bugs or have some suggestions for library improvement. .SH LICENSE .sp The aioredis is offered under \fI\%MIT license\fP\&. .sp .ce ---- .ce 0 .sp .SH GETTING STARTED .SS Commands Pipelining .sp Commands pipelining is built\-in. .sp Every command is sent to transport at\-once (ofcourse if no \fBTypeError\fP/\fBValueError\fP was raised) .sp When you making a call with \fBawait\fP / \fByield from\fP you will be waiting result, and then gather results. .sp Simple example show both cases (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # No pipelining; async def wait_each_command(): val = await redis.get(\(aqfoo\(aq) # wait until \(gaval\(ga is available cnt = await redis.incr(\(aqbar\(aq) # wait until \(gacnt\(ga is available return val, cnt # Sending multiple commands and then gathering results async def pipelined(): fut1 = redis.get(\(aqfoo\(aq) # issue command and return future fut2 = redis.incr(\(aqbar\(aq) # issue command and return future # block until results are available val, cnt = await asyncio.gather(fut1, fut2) return val, cnt .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 For convenience \fBaioredis\fP provides \fBpipeline()\fP method allowing to execute bulk of commands as one (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Explicit pipeline async def explicit_pipeline(): pipe = redis.pipeline() fut1 = pipe.get(\(aqfoo\(aq) fut2 = pipe.incr(\(aqbar\(aq) result = await pipe.execute() val, cnt = await asyncio.gather(fut1, fut2) assert result == [val, cnt] return val, cnt .ft P .fi .UNINDENT .UNINDENT .UNINDENT .UNINDENT .UNINDENT .UNINDENT .SS Multi/Exec transactions .sp \fBaioredis\fP provides several ways for executing transactions: .INDENT 0.0 .IP \(bu 2 when using raw connection you can issue \fBMulti\fP/\fBExec\fP commands manually; .IP \(bu 2 when using \fBaioredis.Redis\fP instance you can use \fBmulti_exec()\fP transaction pipeline. .UNINDENT .sp \fBmulti_exec()\fP method creates and returns new \fBMultiExec\fP object which is used for buffering commands and then executing them inside MULTI/EXEC block. .sp Here is a simple example (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C async def transaction(): tr = redis.multi_exec() future1 = tr.set(\(aqfoo\(aq, \(aq123\(aq) future2 = tr.set(\(aqbar\(aq, \(aq321\(aq) result = await tr.execute() assert result == await asyncio.gather(future1, future2) return result .ft P .fi .UNINDENT .UNINDENT .sp As you can notice \fBawait\fP is \fBonly\fP used at line 5 with \fBtr.execute\fP and \fBnot with\fP \fBtr.set(...)\fP calls. .sp \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 It is very important not to \fBawait\fP buffered command (ie \fBtr.set(\(aqfoo\(aq, \(aq123\(aq)\fP) as it will block forever. .sp The following code will block forever: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C tr = redis.multi_exec() await tr.incr(\(aqfoo\(aq) # that\(aqs all. we\(aqve stuck! .ft P .fi .UNINDENT .UNINDENT .UNINDENT .UNINDENT .SS Pub/Sub mode .sp \fBaioredis\fP provides support for Redis Publish/Subscribe messaging. .sp To switch connection to subscribe mode you must execute \fBsubscribe\fP command by yield’ing from \fBsubscribe()\fP it returns a list of \fBChannel\fP objects representing subscribed channels. .sp As soon as connection is switched to subscribed mode the channel will receive and store messages (the \fBChannel\fP object is basically a wrapper around \fI\%asyncio.Queue\fP). To read messages from channel you need to use \fBget()\fP or \fBget_json()\fP coroutines. .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 In Pub/Sub mode redis connection can only receive messages or issue (P)SUBSCRIBE / (P)UNSUBSCRIBE commands. .UNINDENT .UNINDENT .sp Pub/Sub example (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C sub = await aioredis.create_redis( \(aqredis://localhost\(aq) ch1, ch2 = await sub.subscribe(\(aqchannel:1\(aq, \(aqchannel:2\(aq) assert isinstance(ch1, aioredis.Channel) assert isinstance(ch2, aioredis.Channel) async def async_reader(channel): while await channel.wait_message(): msg = await channel.get(encoding=\(aqutf\-8\(aq) # ... process message ... print("message in {}: {}".format(channel.name, msg)) tsk1 = asyncio.ensure_future(async_reader(ch1)) # Or alternatively: async def async_reader2(channel): while True: msg = await channel.get(encoding=\(aqutf\-8\(aq) if msg is None: break # ... process message ... print("message in {}: {}".format(channel.name, msg)) tsk2 = asyncio.ensure_future(async_reader2(ch2)) .ft P .fi .UNINDENT .UNINDENT .sp Pub/Sub example (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C async def reader(channel): while (await channel.wait_message()): msg = await channel.get(encoding=\(aqutf\-8\(aq) # ... process message ... print("message in {}: {}".format(channel.name, msg)) if msg == STOPWORD: return with await pool as conn: await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) channel = conn.pubsub_channels[\(aqchannel:1\(aq] await reader(channel) # wait for reader to complete await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) # Explicit connection usage conn = await pool.acquire() try: await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) channel = conn.pubsub_channels[\(aqchannel:1\(aq] await reader(channel) # wait for reader to complete await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) finally: pool.release(conn) .ft P .fi .UNINDENT .UNINDENT .SS Python 3.5 \fBasync with\fP / \fBasync for\fP support .sp \fBaioredis\fP is compatible with \fI\%PEP 492\fP\&. .sp \fBPool\fP can be used with \fI\%async with\fP (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pool = await aioredis.create_pool( \(aqredis://localhost\(aq) async with pool.get() as conn: value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) print(\(aqraw value:\(aq, value) .ft P .fi .UNINDENT .UNINDENT .sp It also can be used with \fBawait\fP: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pool = await aioredis.create_pool( \(aqredis://localhost\(aq) # This is exactly the same as: # with (yield from pool) as conn: with (await pool) as conn: value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) print(\(aqraw value:\(aq, value) .ft P .fi .UNINDENT .UNINDENT .sp New \fBscan\fP\-family commands added with support of \fI\%async for\fP (\fBget source code\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis( \(aqredis://localhost\(aq) async for key in redis.iscan(match=\(aqsomething*\(aq): print(\(aqMatched:\(aq, key) async for name, val in redis.ihscan(key, match=\(aqsomething*\(aq): print(\(aqMatched:\(aq, name, \(aq\->\(aq, val) async for val in redis.isscan(key, match=\(aqsomething*\(aq): print(\(aqMatched:\(aq, val) async for val, score in redis.izscan(key, match=\(aqsomething*\(aq): print(\(aqMatched:\(aq, val, \(aq:\(aq, score) .ft P .fi .UNINDENT .UNINDENT .SS SSL/TLS support .sp Though Redis server \fI\%does not support data encryption\fP it is still possible to setup Redis server behind SSL proxy. For such cases \fBaioredis\fP library support secure connections through \fI\%asyncio\fP SSL support. See \fI\%BaseEventLoop.create_connection\fP for details. .SH MIGRATING FROM V0.3 TO V1.0 .SS API changes and backward incompatible changes: .INDENT 0.0 .IP \(bu 2 \fI\%aioredis.create_pool\fP .IP \(bu 2 \fI\%aioredis.create_reconnecting_redis\fP .IP \(bu 2 \fI\%aioredis.Redis\fP .IP \(bu 2 \fI\%Blocking operations and connection sharing\fP .IP \(bu 2 \fI\%Sorted set commands return values\fP .IP \(bu 2 \fI\%Hash hscan command now returns list of tuples\fP .UNINDENT .sp .ce ---- .ce 0 .sp .SS aioredis.create_pool .sp \fBcreate_pool()\fP now returns \fBConnectionsPool\fP instead of \fBRedisPool\fP\&. .sp This means that pool now operates with \fBRedisConnection\fP objects and not \fBRedis\fP\&. .TS center; |l|l|. _ T{ v0.3 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) with await pool as redis: # calling methods of Redis class await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ T{ v1.0 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) with await pool as conn: # calling conn.lpush will raise AttributeError exception await conn.execute(\(aqlpush\(aq, \(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ .TE .SS aioredis.create_reconnecting_redis .sp \fBcreate_reconnecting_redis()\fP has been dropped. .sp \fBcreate_redis_pool()\fP can be used instead of former function. .TS center; |l|l|. _ T{ v0.3 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_reconnecting_redis( (\(aqlocalhost\(aq, 6379)) await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ T{ v1.0 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis_pool( (\(aqlocalhost\(aq, 6379)) await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ .TE .sp \fBcreate_redis_pool\fP returns \fBRedis\fP initialized with \fBConnectionsPool\fP which is responsible for reconnecting to server. .sp Also \fBcreate_reconnecting_redis\fP was patching the \fBRedisConnection\fP and breaking \fBclosed\fP property (it was always \fBTrue\fP). .SS aioredis.Redis .sp \fBRedis\fP class now operates with objects implementing \fBaioredis.abc.AbcConnection\fP interface. \fBRedisConnection\fP and \fBConnectionsPool\fP are both implementing \fBAbcConnection\fP so it is become possible to use same API when working with either single connection or connections pool. .TS center; |l|l|. _ T{ v0.3 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) redis = await pool.acquire() # get Redis object await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ T{ v1.0 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) redis = await aioredis.create_redis_pool((\(aqlocalhost\(aq, 6379)) await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) .ft P .fi .UNINDENT .UNINDENT T} _ .TE .SS Blocking operations and connection sharing .sp Current implementation of \fBConnectionsPool\fP by default \fBexecute every command on random connection\fP\&. The \fIPros\fP of this is that it allowed implementing \fBAbcConnection\fP interface and hide pool inside \fBRedis\fP class, and also keep pipelining feature (like RedisConnection.execute). The \fICons\fP of this is that \fBdifferent tasks may use same connection and block it\fP with some long\-running command. .sp We can call it \fBShared Mode\fP — commands are sent to random connections in pool without need to lock [connection]: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis_pool( (\(aqlocalhost\(aq, 6379), minsize=1, maxsize=1) async def task(): # Shared mode await redis.set(\(aqkey\(aq, \(aqval\(aq) asyncio.ensure_future(task()) asyncio.ensure_future(task()) # Both tasks will send commands through same connection # without acquiring (locking) it first. .ft P .fi .UNINDENT .UNINDENT .sp Blocking operations (like \fBblpop\fP, \fBbrpop\fP or long\-running LUA scripts) in \fBshared mode\fP mode will block connection and thus may lead to whole program malfunction. .sp This \fIblocking\fP issue can be easily solved by using exclusive connection for such operations: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis_pool( (\(aqlocalhost\(aq, 6379), minsize=1, maxsize=1) async def task(): # Exclusive mode with await redis as r: await r.set(\(aqkey\(aq, \(aqval\(aq) asyncio.ensure_future(task()) asyncio.ensure_future(task()) # Both tasks will first acquire connection. .ft P .fi .UNINDENT .UNINDENT .sp We can call this \fBExclusive Mode\fP — context manager is used to acquire (lock) exclusive connection from pool and send all commands through it. .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 This technique is similar to v0.3 pool usage: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # in aioredis v0.3 pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) with await pool as redis: # Redis is bound to exclusive connection redis.set(\(aqkey\(aq, \(aqval\(aq) .ft P .fi .UNINDENT .UNINDENT .UNINDENT .UNINDENT .SS Sorted set commands return values .sp Sorted set commands (like \fBzrange\fP, \fBzrevrange\fP and others) that accept \fBwithscores\fP argument now \fBreturn list of tuples\fP instead of plain list. .TS center; |l|l|. _ T{ v0.3 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) assert res == [b\(aqone\(aq, 1, b\(aqtwo\(aq, 2] # not an esiest way to make a dict it = iter(res) assert dict(zip(it, it)) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} .ft P .fi .UNINDENT .UNINDENT T} _ T{ v1.0 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) assert res == [(b\(aqone\(aq, 1), (b\(aqtwo\(aq, 2)] # now its easier to make a dict of it assert dict(res) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} .ft P .fi .UNINDENT .UNINDENT T} _ .TE .SS Hash \fBhscan\fP command now returns list of tuples .sp \fBhscan\fP updated to return a list of tuples instead of plain mixed key/value list. .TS center; |l|l|. _ T{ v0.3 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) cur, data = await redis.hscan(\(aqhash\(aq) assert data == [b\(aqone\(aq, b\(aq1\(aq, b\(aqtwo\(aq, b\(aq2\(aq] # not an esiest way to make a dict it = iter(data) assert dict(zip(it, it)) == {b\(aqone\(aq: b\(aq1\(aq, b\(aqtwo\(aq: b\(aq2\(aq} .ft P .fi .UNINDENT .UNINDENT T} _ T{ v1.0 T} T{ .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) cur, data = await redis.hscan(\(aqhash\(aq) assert data == [(b\(aqone\(aq, b\(aq1\(aq), (b\(aqtwo\(aq, b\(aq2\(aq)] # now its easier to make a dict of it assert dict(data) == {b\(aqone\(aq: b\(aq1\(aq: b\(aqtwo\(aq: b\(aq2\(aq} .ft P .fi .UNINDENT .UNINDENT T} _ .TE .SH AIOREDIS — API REFERENCE .SS Connection .sp Redis Connection is the core function of the library. Connection instances can be used as is or through \fI\%pool\fP or \fI\%high\-level API\fP\&. .sp Connection usage is as simple as: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def connect_uri(): conn = await aioredis\&.create_connection( \(aqredis://localhost/0\(aq) val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) async def connect_tcp(): conn = await aioredis\&.create_connection( (\(aqlocalhost\(aq, 6379)) val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) async def connect_unixsocket(): conn = await aioredis\&.create_connection( \(aq/path/to/redis/socket\(aq) # or uri \(aqunix:///path/to/redis/socket?db=1\(aq val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) asyncio\&.get_event_loop()\&.run_until_complete(connect_tcp()) asyncio\&.get_event_loop()\&.run_until_complete(connect_unixsocket()) .ft P .fi .UNINDENT .UNINDENT .INDENT 0.0 .TP .B coroutine aioredis.create_connection(address, *, db=0, password=None, ssl=None, encoding=None, parser=None, loop=None, timeout=None) Creates Redis connection. .sp Changed in version v0.3.1: \fBtimeout\fP argument added. .sp Changed in version v1.0: \fBparser\fP argument added. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBaddress\fP (\fI\%tuple\fP\fI or \fP\fI\%str\fP) – .sp An address where to connect. Can be one of the following: .INDENT 2.0 .IP \(bu 2 a Redis URI — \fB"redis://host:6379/0?encoding=utf\-8"\fP; .IP \(bu 2 a (host, port) tuple — \fB(\(aqlocalhost\(aq, 6379)\fP; .IP \(bu 2 or a unix domain socket path string — \fB"/path/to/redis.sock"\fP\&. .UNINDENT .IP \(bu 2 \fBdb\fP (\fI\%int\fP) – Redis database index to switch to when connected. .IP \(bu 2 \fBpassword\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Password to use if redis server instance requires authorization. .IP \(bu 2 \fBssl\fP (\fI\%ssl.SSLContext\fP or True or None) – SSL context that is passed through to \fBasyncio.BaseEventLoop.create_connection()\fP\&. .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Codec to use for response decoding. .IP \(bu 2 \fBparser\fP (\fI\%callable\fP\fI or \fP\fI\%None\fP) – Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .IP \(bu 2 \fBtimeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) – Max time to open a connection, otherwise raise \fI\%asyncio.TimeoutError\fP exception. \fBNone\fP by default .UNINDENT .TP .B Returns \fI\%RedisConnection\fP instance. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.RedisConnection Bases: \fBabc.AbcConnection\fP .sp Redis connection interface. .INDENT 7.0 .TP .B address Redis server address; either IP\-port tuple or unix socket str (\fIread\-only\fP). IP is either IPv4 or IPv6 depending on resolved host part in initial address. .sp New in version v0.2.8. .UNINDENT .INDENT 7.0 .TP .B db Current database index (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B encoding Current codec for response decoding (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B closed Set to \fBTrue\fP if connection is closed (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B in_transaction Set to \fBTrue\fP when MULTI command was issued (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B pubsub_channels \fIRead\-only\fP dict with subscribed channels. Keys are bytes, values are \fI\%Channel\fP instances. .UNINDENT .INDENT 7.0 .TP .B pubsub_patterns \fIRead\-only\fP dict with subscribed patterns. Keys are bytes, values are \fI\%Channel\fP instances. .UNINDENT .INDENT 7.0 .TP .B in_pubsub Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. \fIRead\-only\fP\&. .UNINDENT .INDENT 7.0 .TP .B execute(command, *args, encoding=_NOTSET) Execute Redis command. .sp The method is \fBnot a coroutine\fP itself but instead it writes to underlying transport and returns a \fI\%asyncio.Future\fP waiting for result. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBcommand\fP (\fI\%str\fP\fI, \fP\fI\%bytes\fP\fI, \fP\fI\%bytearray\fP) – Command to execute .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Keyword\-only argument for overriding response decoding. By default will use connection\-wide encoding. May be set to None to skip response decoding. .UNINDENT .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – When any of arguments is None or can not be encoded as bytes. .IP \(bu 2 \fBaioredis.ReplyError\fP – For redis error replies. .IP \(bu 2 \fBaioredis.ProtocolError\fP – When response can not be decoded and/or connection is broken. .UNINDENT .TP .B Returns Returns bytes or int reply (or str if encoding was set) .UNINDENT .UNINDENT .INDENT 7.0 .TP .B execute_pubsub(command, *channels_or_patterns) Method to execute Pub/Sub commands. The method is not a coroutine itself but returns a \fI\%asyncio.gather()\fP coroutine. Method also accept \fI\%aioredis.Channel\fP instances as command arguments: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C >>> ch1 = Channel(\(aqA\(aq, is_pattern=False, loop=loop) >>> await conn.execute_pubsub(\(aqsubscribe\(aq, ch1) [[b\(aqsubscribe\(aq, b\(aqA\(aq, 1]] .ft P .fi .UNINDENT .UNINDENT .sp Changed in version v0.3: The method accept \fI\%Channel\fP instances. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBcommand\fP (\fI\%str\fP\fI, \fP\fI\%bytes\fP\fI, \fP\fI\%bytearray\fP) – One of the following Pub/Sub commands: \fBsubscribe\fP, \fBunsubscribe\fP, \fBpsubscribe\fP, \fBpunsubscribe\fP\&. .IP \(bu 2 \fB*channels_or_patterns\fP – Channels or patterns to subscribe connection to or unsubscribe from. At least one channel/pattern is required. .UNINDENT .TP .B Returns Returns a list of subscribe/unsubscribe messages, ex: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C >>> await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqA\(aq, \(aqB\(aq) [[b\(aqsubscribe\(aq, b\(aqA\(aq, 1], [b\(aqsubscribe\(aq, b\(aqB\(aq, 2]] .ft P .fi .UNINDENT .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B close() Closes connection. .sp Mark connection as closed and schedule cleanup procedure. .sp All pending commands will be canceled with \fI\%ConnectionForcedCloseError\fP\&. .UNINDENT .INDENT 7.0 .TP .B wait_closed() Coroutine waiting for connection to get closed. .UNINDENT .INDENT 7.0 .TP .B select(db) Changes current db index to new one. .INDENT 7.0 .TP .B Parameters \fBdb\fP (\fI\%int\fP) – New redis database index. .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – When \fBdb\fP parameter is not int. .IP \(bu 2 \fI\%ValueError\fP – When \fBdb\fP parameter is less then 0. .UNINDENT .TP .B Return True Always returns True or raises exception. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B auth(password) Send AUTH command. .INDENT 7.0 .TP .B Parameters \fBpassword\fP (\fI\%str\fP) – Plain\-text password .TP .B Return bool True if redis replied with ‘OK’. .UNINDENT .UNINDENT .UNINDENT .sp .ce ---- .ce 0 .sp .SS Connections Pool .sp The library provides connections pool. The basic usage is as follows: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import aioredis async def sample_pool(): pool = await aioredis\&.create_pool(\(aqredis://localhost\(aq) val = await pool\&.execute(\(aqget\(aq, \(aqmy\-key\(aq) .ft P .fi .UNINDENT .UNINDENT .INDENT 0.0 .TP .B aioredis.create_pool(address, *, db=0, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, loop=None, create_connection_timeout=None, pool_cls=None, connection_cls=None) A \fI\%coroutine\fP that instantiates a pool of \fI\%RedisConnection\fP\&. .sp Changed in version v0.2.7: \fBminsize\fP default value changed from 10 to 1. .sp Changed in version v0.2.8: Disallow arbitrary ConnectionsPool maxsize. .sp Deprecated since version v0.2.9: \fIcommands_factory\fP argument is deprecated and will be removed in \fIv1.0\fP\&. .sp Changed in version v0.3.2: \fBcreate_connection_timeout\fP argument added. .sp New in version v1.0: \fBparser\fP, \fBpool_cls\fP and \fBconnection_cls\fP arguments added. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBaddress\fP (\fI\%tuple\fP\fI or \fP\fI\%str\fP) – .sp An address where to connect. Can be one of the following: .INDENT 2.0 .IP \(bu 2 a Redis URI — \fB"redis://host:6379/0?encoding=utf\-8"\fP; .IP \(bu 2 a (host, port) tuple — \fB(\(aqlocalhost\(aq, 6379)\fP; .IP \(bu 2 or a unix domain socket path string — \fB"/path/to/redis.sock"\fP\&. .UNINDENT .IP \(bu 2 \fBdb\fP (\fI\%int\fP) – Redis database index to switch to when connected. .IP \(bu 2 \fBpassword\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Password to use if redis server instance requires authorization. .IP \(bu 2 \fBssl\fP (\fI\%ssl.SSLContext\fP or True or None) – SSL context that is passed through to \fBasyncio.BaseEventLoop.create_connection()\fP\&. .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Codec to use for response decoding. .IP \(bu 2 \fBminsize\fP (\fI\%int\fP) – Minimum number of free connection to create in pool. \fB1\fP by default. .IP \(bu 2 \fBmaxsize\fP (\fI\%int\fP) – Maximum number of connection to keep in pool. \fB10\fP by default. Must be greater then \fB0\fP\&. \fBNone\fP is disallowed. .IP \(bu 2 \fBparser\fP (\fI\%callable\fP\fI or \fP\fI\%None\fP) – Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .IP \(bu 2 \fBcreate_connection_timeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) – Max time to open a connection, otherwise raise an \fI\%asyncio.TimeoutError\fP\&. \fBNone\fP by default. .IP \(bu 2 \fBpool_cls\fP (\fIaioredis.abc.AbcPool\fP) – Can be used to instantiate custom pool class. This argument \fBmust be\fP a subclass of \fBAbcPool\fP\&. .IP \(bu 2 \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) – Can be used to make pool instantiate custom connection classes. This argument \fBmust be\fP a subclass of \fBAbcConnection\fP\&. .UNINDENT .TP .B Returns \fI\%ConnectionsPool\fP instance. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.ConnectionsPool Bases: \fBabc.AbcPool\fP .sp Redis connections pool. .INDENT 7.0 .TP .B minsize A minimum size of the pool (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B maxsize A maximum size of the pool (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B size Current pool size — number of free and used connections (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B freesize Current number of free connections (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B db Currently selected db index (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B encoding Current codec for response decoding (\fIread\-only\fP). .UNINDENT .INDENT 7.0 .TP .B closed \fBTrue\fP if pool is closed. .sp New in version v0.2.8. .UNINDENT .INDENT 7.0 .TP .B execute(command, *args, **kwargs) Execute Redis command in a free connection and return \fI\%asyncio.Future\fP waiting for result. .sp This method tries to pick a free connection from pool and send command through it at once (keeping pipelining feature provided by \fI\%aioredis.RedisConnection.execute()\fP). If no connection is found — returns coroutine waiting for free connection to execute command. .sp New in version v1.0. .UNINDENT .INDENT 7.0 .TP .B execute_pubsub(command, *channels) Execute Redis (p)subscribe/(p)unsubscribe command. .sp \fBConnectionsPool\fP picks separate free connection for pub/sub and uses it until pool is closed or connection is disconnected (unsubscribing from all channels/pattern will leave connection locked for pub/sub use). .sp There is no auto\-reconnect for Pub/Sub connection as this will hide from user messages loss. .sp Has similar to \fI\%execute()\fP behavior, ie: tries to pick free connection from pool and switch it to pub/sub mode; or fallback to coroutine waiting for free connection and repeating operation. .sp New in version v1.0. .UNINDENT .INDENT 7.0 .TP .B get_connection(command, args=()) Gets free connection from pool returning tuple of (connection, address). .sp If no free connection is found – None is returned in place of connection. .INDENT 7.0 .TP .B Return type tuple(\fI\%RedisConnection\fP or None, str) .UNINDENT .sp New in version v1.0. .UNINDENT .INDENT 7.0 .TP .B coroutine clear() Closes and removes all free connections in the pool. .UNINDENT .INDENT 7.0 .TP .B coroutine select(db) Changes db index for all free connections in the pool. .INDENT 7.0 .TP .B Parameters \fBdb\fP (\fI\%int\fP) – New database index. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B coroutine acquire(command=None, args=()) Acquires a connection from \fIfree pool\fP\&. Creates new connection if needed. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBcommand\fP – reserved for future. .IP \(bu 2 \fBargs\fP – reserved for future. .UNINDENT .TP .B Raises \fBaioredis.PoolClosedError\fP – if pool is already closed .UNINDENT .UNINDENT .INDENT 7.0 .TP .B release(conn) Returns used connection back into pool. .sp When returned connection has db index that differs from one in pool the connection will be dropped. When queue of free connections is full the connection will be dropped. .sp \fBNOTE:\fP .INDENT 7.0 .INDENT 3.5 This method is \fBnot a coroutine\fP\&. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B Parameters \fBconn\fP (\fIaioredis.RedisConnection\fP) – A RedisConnection instance. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B close() Close all free and in\-progress connections and mark pool as closed. .sp New in version v0.2.8. .UNINDENT .INDENT 7.0 .TP .B coroutine wait_closed() Wait until pool gets closed (when all connections are closed). .sp New in version v0.2.8. .UNINDENT .UNINDENT .sp .ce ---- .ce 0 .sp .SS Pub/Sub Channel object .sp \fIChannel\fP object is a wrapper around queue for storing received pub/sub messages. .INDENT 0.0 .TP .B class aioredis.Channel(name, is_pattern, loop=None) Bases: \fBabc.AbcChannel\fP .sp Object representing Pub/Sub messages queue. It’s basically a wrapper around \fI\%asyncio.Queue\fP\&. .INDENT 7.0 .TP .B name Holds encoded channel/pattern name. .UNINDENT .INDENT 7.0 .TP .B is_pattern Set to True for pattern channels. .UNINDENT .INDENT 7.0 .TP .B is_active Set to True if there are messages in queue and connection is still subscribed to this channel. .UNINDENT .INDENT 7.0 .TP .B coroutine get(*, encoding=None, decoder=None) Coroutine that waits for and returns a message. .sp Return value is message received or None signifying that channel has been unsubscribed and no more messages will be received. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBencoding\fP (\fI\%str\fP) – If not None used to decode resulting bytes message. .IP \(bu 2 \fBdecoder\fP (\fI\%callable\fP) – If specified used to decode message, ex. \fI\%json.loads()\fP .UNINDENT .TP .B Raises \fBaioredis.ChannelClosedError\fP – If channel is unsubscribed and has no more messages. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B get_json(*, encoding="utf\-8") Shortcut to \fBget(encoding="utf\-8", decoder=json.loads)\fP .UNINDENT .INDENT 7.0 .TP .B coroutine wait_message() Waits for message to become available in channel. .sp Main idea is to use it in loops: .sp .nf .ft C >>> ch = redis.channels[\(aqchannel:1\(aq] >>> while await ch.wait_message(): \&... msg = await ch.get() .ft P .fi .UNINDENT .INDENT 7.0 .TP .B coroutine async\-for iter() Same as \fI\%get()\fP method but it is a native coroutine. .sp Usage example: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C >>> async for msg in ch.iter(): \&... print(msg) .ft P .fi .UNINDENT .UNINDENT .sp New in version 0.2.5: Available for Python 3.5 only .UNINDENT .UNINDENT .sp .ce ---- .ce 0 .sp .SS Exceptions .INDENT 0.0 .TP .B exception aioredis.RedisError .INDENT 7.0 .TP .B Bases \fI\%Exception\fP .UNINDENT .sp Base exception class for aioredis exceptions. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ProtocolError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised when protocol error occurs. When this type of exception is raised connection must be considered broken and must be closed. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ReplyError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised for Redis error replies\&. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.MaxClientsError .INDENT 7.0 .TP .B Bases \fI\%ReplyError\fP .UNINDENT .sp Raised when maximum number of clients has been reached (Redis server configured value). .UNINDENT .INDENT 0.0 .TP .B exception aioredis.AuthError .INDENT 7.0 .TP .B Bases \fI\%ReplyError\fP .UNINDENT .sp Raised when authentication errors occur. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ConnectionClosedError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised if connection to server was lost/closed. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ConnectionForcedCloseError .INDENT 7.0 .TP .B Bases \fI\%ConnectionClosedError\fP .UNINDENT .sp Raised if connection was closed with \fI\%RedisConnection.close()\fP method. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.PipelineError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised from \fBpipeline()\fP if any pipelined command raised error. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.MultiExecError .INDENT 7.0 .TP .B Bases \fI\%PipelineError\fP .UNINDENT .sp Same as \fI\%PipelineError\fP but raised when executing multi_exec block. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.WatchVariableError .INDENT 7.0 .TP .B Bases \fI\%MultiExecError\fP .UNINDENT .sp Raised if watched variable changed (EXEC returns None). Subclass of \fI\%MultiExecError\fP\&. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ChannelClosedError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised from \fI\%aioredis.Channel.get()\fP when Pub/Sub channel is unsubscribed and messages queue is empty. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.PoolClosedError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised from \fI\%aioredis.ConnectionsPool.acquire()\fP when pool is already closed. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.ReadOnlyError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised from slave when read\-only mode is enabled. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.MasterNotFoundError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised by Sentinel client if it can not find requested master. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.SlaveNotFoundError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised by Sentinel client if it can not find requested slave. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.MasterReplyError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised if establishing connection to master failed with \fBRedisError\fP, for instance because of required or wrong authentication. .UNINDENT .INDENT 0.0 .TP .B exception aioredis.SlaveReplyError .INDENT 7.0 .TP .B Bases \fI\%RedisError\fP .UNINDENT .sp Raised if establishing connection to slave failed with \fBRedisError\fP, for instance because of required or wrong authentication. .UNINDENT .SS Exceptions Hierarchy .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C Exception RedisError ProtocolError ReplyError MaxClientsError AuthError PipelineError MultiExecError WatchVariableError ChannelClosedError ConnectionClosedError ConnectionForcedCloseError PoolClosedError ReadOnlyError MasterNotFoundError SlaveNotFoundError MasterReplyError SlaveReplyError .ft P .fi .UNINDENT .UNINDENT .sp .ce ---- .ce 0 .sp .SS Commands Interface .sp The library provides high\-level API implementing simple interface to Redis commands. .sp The usage is as simple as: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import aioredis # Create Redis client bound to single non\-reconnecting connection. async def single_connection(): redis = await aioredis\&.create_redis( \(aqredis://localhost\(aq) val = await redis\&.get(\(aqmy\-key\(aq) # Create Redis client bound to connections pool. async def pool_of_connections(): redis = await aioredis\&.create_redis_pool( \(aqredis://localhost\(aq) val = await redis\&.get(\(aqmy\-key\(aq) # we can also use pub/sub as underlying pool # has several free connections: ch1, ch2 = await redis\&.subscribe(\(aqchan:1\(aq, \(aqchan:2\(aq) # publish using free connection await redis\&.publish(\(aqchan:1\(aq, \(aqHello\(aq) await ch1\&.get() .ft P .fi .UNINDENT .UNINDENT .sp For commands reference — see commands mixins reference\&. .INDENT 0.0 .TP .B coroutine aioredis.create_redis(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None, loop=None) This \fI\%coroutine\fP creates high\-level Redis interface instance bound to single Redis connection (without auto\-reconnect). .sp New in version v1.0: \fBparser\fP, \fBtimeout\fP and \fBconnection_cls\fP arguments added. .sp See also \fI\%RedisConnection\fP for parameters description. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBaddress\fP (\fI\%tuple\fP\fI or \fP\fI\%str\fP) – An address where to connect. Can be a (host, port) tuple, unix domain socket path string or a Redis URI string. .IP \(bu 2 \fBdb\fP (\fI\%int\fP) – Redis database index to switch to when connected. .IP \(bu 2 \fBpassword\fP (\fI\%str\fP\fI or \fP\fI\%bytes\fP\fI or \fP\fI\%None\fP) – Password to use if Redis server instance requires authorization. .IP \(bu 2 \fBssl\fP (\fI\%ssl.SSLContext\fP or True or None) – SSL context that is passed through to \fBasyncio.BaseEventLoop.create_connection()\fP\&. .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Codec to use for response decoding. .IP \(bu 2 \fBcommands_factory\fP (\fI\%callable\fP) – A factory accepting single parameter – object implementing \fBAbcConnection\fP and returning an instance providing high\-level interface to Redis. \fBRedis\fP by default. .IP \(bu 2 \fBparser\fP (\fI\%callable\fP\fI or \fP\fI\%None\fP) – Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 \fBtimeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) – Max time to open a connection, otherwise raise \fI\%asyncio.TimeoutError\fP exception. \fBNone\fP by default .IP \(bu 2 \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) – Can be used to instantiate custom connection class. This argument \fBmust be\fP a subclass of \fBAbcConnection\fP\&. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Returns Redis client (result of \fBcommands_factory\fP call), \fBRedis\fP by default. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B coroutine aioredis.create_redis_pool(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None, loop=None) This \fI\%coroutine\fP create high\-level Redis client instance bound to connections pool (this allows auto\-reconnect and simple pub/sub use). .sp See also \fI\%ConnectionsPool\fP for parameters description. .sp Changed in version v1.0: \fBparser\fP, \fBtimeout\fP, \fBpool_cls\fP and \fBconnection_cls\fP arguments added. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBaddress\fP (\fI\%tuple\fP\fI or \fP\fI\%str\fP) – An address where to connect. Can be a (host, port) tuple, unix domain socket path string or a Redis URI string. .IP \(bu 2 \fBdb\fP (\fI\%int\fP) – Redis database index to switch to when connected. .IP \(bu 2 \fBpassword\fP (\fI\%str\fP\fI or \fP\fI\%bytes\fP\fI or \fP\fI\%None\fP) – Password to use if Redis server instance requires authorization. .IP \(bu 2 \fBssl\fP (\fI\%ssl.SSLContext\fP or True or None) – SSL context that is passed through to \fBasyncio.BaseEventLoop.create_connection()\fP\&. .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Codec to use for response decoding. .IP \(bu 2 \fBcommands_factory\fP (\fI\%callable\fP) – A factory accepting single parameter – object implementing \fBAbcConnection\fP interface and returning an instance providing high\-level interface to Redis. \fBRedis\fP by default. .IP \(bu 2 \fBminsize\fP (\fI\%int\fP) – Minimum number of connections to initialize and keep in pool. Default is 1. .IP \(bu 2 \fBmaxsize\fP (\fI\%int\fP) – Maximum number of connections that can be created in pool. Default is 10. .IP \(bu 2 \fBparser\fP (\fI\%callable\fP\fI or \fP\fI\%None\fP) – Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 \fBtimeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) – Max time to open a connection, otherwise raise \fI\%asyncio.TimeoutError\fP exception. \fBNone\fP by default .IP \(bu 2 \fBpool_cls\fP (\fIaioredis.abc.AbcPool\fP) – Can be used to instantiate custom pool class. This argument \fBmust be\fP a subclass of \fBAbcPool\fP\&. .IP \(bu 2 \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) – Can be used to make pool instantiate custom connection classes. This argument \fBmust be\fP a subclass of \fBAbcConnection\fP\&. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Returns Redis client (result of \fBcommands_factory\fP call), \fBRedis\fP by default. .UNINDENT .UNINDENT .SH AIOREDIS.REDIS — COMMANDS MIXINS REFERENCE .sp This section contains reference for mixins implementing Redis commands. .sp Descriptions are taken from \fBdocstrings\fP so may not contain proper markup. .INDENT 0.0 .TP .B class aioredis.Redis(pool_or_conn) High\-level Redis interface. .sp Gathers in one place Redis commands implemented in mixins. .sp For commands details see: \fI\%http://redis.io/commands/#connection\fP .INDENT 7.0 .TP .B Parameters \fBpool_or_conn\fP (\fBAbcConnection\fP) – Can be either \fBRedisConnection\fP or \fBConnectionsPool\fP\&. .UNINDENT .INDENT 7.0 .TP .B address Redis connection address (if applicable). .UNINDENT .INDENT 7.0 .TP .B auth(password) Authenticate to server. .sp This method wraps call to \fBaioredis.RedisConnection.auth()\fP .UNINDENT .INDENT 7.0 .TP .B close() Close client connections. .UNINDENT .INDENT 7.0 .TP .B closed True if connection is closed. .UNINDENT .INDENT 7.0 .TP .B connection Either \fBaioredis.RedisConnection\fP, or \fBaioredis.ConnectionsPool\fP instance. .UNINDENT .INDENT 7.0 .TP .B db Currently selected db index. .UNINDENT .INDENT 7.0 .TP .B echo(message, *, encoding=) Echo the given string. .UNINDENT .INDENT 7.0 .TP .B encoding Current set codec or None. .UNINDENT .INDENT 7.0 .TP .B in_transaction Set to True when MULTI command was issued. .UNINDENT .INDENT 7.0 .TP .B ping(message=, *, encoding=) Ping the server. .sp Accept optional echo message. .UNINDENT .INDENT 7.0 .TP .B quit() Close the connection. .UNINDENT .INDENT 7.0 .TP .B select(db) Change the selected database for the current connection. .sp This method wraps call to \fBaioredis.RedisConnection.select()\fP .UNINDENT .INDENT 7.0 .TP .B coroutine wait_closed() Coroutine waiting until underlying connections are closed. .UNINDENT .UNINDENT .SS Generic commands .INDENT 0.0 .TP .B class aioredis.commands.GenericCommandsMixin Generic commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#generic\fP .INDENT 7.0 .TP .B delete(key, *keys) Delete a key. .UNINDENT .INDENT 7.0 .TP .B dump(key) Dump a key. .UNINDENT .INDENT 7.0 .TP .B exists(key, *keys) Check if key(s) exists. .sp Changed in version v0.2.9: Accept multiple keys; \fBreturn\fP type \fBchanged\fP from bool to int. .UNINDENT .INDENT 7.0 .TP .B expire(key, timeout) Set a timeout on key. .sp if timeout is float it will be multiplied by 1000 coerced to int and passed to \fIpexpire\fP method. .sp Otherwise raises TypeError if timeout argument is not int. .UNINDENT .INDENT 7.0 .TP .B expireat(key, timestamp) Set expire timestamp on a key. .sp if timeout is float it will be multiplied by 1000 coerced to int and passed to \fIpexpireat\fP method. .sp Otherwise raises TypeError if timestamp argument is not int. .UNINDENT .INDENT 7.0 .TP .B iscan(*, match=None, count=None) Incrementally iterate the keys space using async for. .sp Usage example: .sp .nf .ft C >>> async for key in redis.iscan(match=\(aqsomething*\(aq): \&... print(\(aqMatched:\(aq, key) .ft P .fi .UNINDENT .INDENT 7.0 .TP .B keys(pattern, *, encoding=) Returns all keys matching pattern. .UNINDENT .INDENT 7.0 .TP .B migrate(host, port, key, dest_db, timeout, *, copy=False, replace=False) Atomically transfer a key from a Redis instance to another one. .UNINDENT .INDENT 7.0 .TP .B migrate_keys(host, port, keys, dest_db, timeout, *, copy=False, replace=False) Atomically transfer keys from one Redis instance to another one. .sp Keys argument must be list/tuple of keys to migrate. .UNINDENT .INDENT 7.0 .TP .B move(key, db) Move key from currently selected database to specified destination. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if db is not int .IP \(bu 2 \fI\%ValueError\fP – if db is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B object_encoding(key) Returns the kind of internal representation used in order to store the value associated with a key (OBJECT ENCODING). .UNINDENT .INDENT 7.0 .TP .B object_idletime(key) Returns the number of seconds since the object is not requested by read or write operations (OBJECT IDLETIME). .UNINDENT .INDENT 7.0 .TP .B object_refcount(key) Returns the number of references of the value associated with the specified key (OBJECT REFCOUNT). .UNINDENT .INDENT 7.0 .TP .B persist(key) Remove the existing timeout on key. .UNINDENT .INDENT 7.0 .TP .B pexpire(key, timeout) Set a milliseconds timeout on key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if timeout is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B pexpireat(key, timestamp) Set expire timestamp on key, timestamp in milliseconds. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if timeout is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B pttl(key) Returns time\-to\-live for a key, in milliseconds. .sp Special return values (starting with Redis 2.8): .INDENT 7.0 .IP \(bu 2 command returns \-2 if the key does not exist. .IP \(bu 2 command returns \-1 if the key exists but has no associated expire. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B randomkey(*, encoding=) Return a random key from the currently selected database. .UNINDENT .INDENT 7.0 .TP .B rename(key, newkey) Renames key to newkey. .INDENT 7.0 .TP .B Raises \fI\%ValueError\fP – if key == newkey .UNINDENT .UNINDENT .INDENT 7.0 .TP .B renamenx(key, newkey) Renames key to newkey only if newkey does not exist. .INDENT 7.0 .TP .B Raises \fI\%ValueError\fP – if key == newkey .UNINDENT .UNINDENT .INDENT 7.0 .TP .B restore(key, ttl, value) Creates a key associated with a value that is obtained via DUMP. .UNINDENT .INDENT 7.0 .TP .B scan(cursor=0, match=None, count=None) Incrementally iterate the keys space. .sp Usage example: .sp .nf .ft C >>> match = \(aqsomething*\(aq >>> cur = b\(aq0\(aq >>> while cur: \&... cur, keys = await redis.scan(cur, match=match) \&... for key in keys: \&... print(\(aqMatched:\(aq, key) .ft P .fi .UNINDENT .INDENT 7.0 .TP .B sort(key, *get_patterns, by=None, offset=None, count=None, asc=None, alpha=False, store=None) Sort the elements in a list, set or sorted set. .UNINDENT .INDENT 7.0 .TP .B ttl(key) Returns time\-to\-live for a key, in seconds. .sp Special return values (starting with Redis 2.8): * command returns \-2 if the key does not exist. * command returns \-1 if the key exists but has no associated expire. .UNINDENT .INDENT 7.0 .TP .B type(key) Returns the string representation of the value’s type stored at key. .UNINDENT .UNINDENT .SS Geo commands .sp New in version v0.3.0. .INDENT 0.0 .TP .B class aioredis.commands.GeoCommandsMixin Geo commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#geo\fP .INDENT 7.0 .TP .B geoadd(key, longitude, latitude, member, *args, **kwargs) Add one or more geospatial items in the geospatial index represented using a sorted set. .INDENT 7.0 .TP .B Return type \fI\%int\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B geodist(key, member1, member2, unit=\(aqm\(aq) Returns the distance between two members of a geospatial index. .INDENT 7.0 .TP .B Return type \fI\%list\fP[\fI\%float\fP or \fI\%None\fP] .UNINDENT .UNINDENT .INDENT 7.0 .TP .B geohash(key, member, *members, **kwargs) Returns members of a geospatial index as standard geohash strings. .INDENT 7.0 .TP .B Return type \fI\%list\fP[\fI\%str\fP or \fI\%bytes\fP or \fI\%None\fP] .UNINDENT .UNINDENT .INDENT 7.0 .TP .B geopos(key, member, *members, **kwargs) Returns longitude and latitude of members of a geospatial index. .INDENT 7.0 .TP .B Return type \fI\%list\fP[GeoPoint or \fI\%None\fP] .UNINDENT .UNINDENT .INDENT 7.0 .TP .B georadius(key, longitude, latitude, radius, unit=\(aqm\(aq, *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=) Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point. .sp Return value follows Redis convention: .INDENT 7.0 .IP \(bu 2 if none of \fBWITH*\fP flags are set – list of strings returned: .sp .nf .ft C >>> await redis.georadius(\(aqSicily\(aq, 15, 37, 200, \(aqkm\(aq) [b"Palermo", b"Catania"] .ft P .fi .IP \(bu 2 if any flag (or all) is set – list of named tuples returned: .sp .nf .ft C >>> await redis.georadius(\(aqSicily\(aq, 15, 37, 200, \(aqkm\(aq, \&... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] .ft P .fi .UNINDENT .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – radius is not float or int .IP \(bu 2 \fI\%TypeError\fP – count is not int .IP \(bu 2 \fI\%ValueError\fP – if unit not equal \fBm\fP, \fBkm\fP, \fBmi\fP or \fBft\fP .IP \(bu 2 \fI\%ValueError\fP – if sort not equal \fBASC\fP or \fBDESC\fP .UNINDENT .TP .B Return type \fI\%list\fP[\fI\%str\fP] or \fI\%list\fP[GeoMember] .UNINDENT .UNINDENT .INDENT 7.0 .TP .B georadiusbymember(key, member, radius, unit=\(aqm\(aq, *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=) Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member. .sp Return value follows Redis convention: .INDENT 7.0 .IP \(bu 2 if none of \fBWITH*\fP flags are set – list of strings returned: .sp .nf .ft C >>> await redis.georadiusbymember(\(aqSicily\(aq, \(aqPalermo\(aq, 200, \(aqkm\(aq) [b"Palermo", b"Catania"] .ft P .fi .IP \(bu 2 if any flag (or all) is set – list of named tuples returned: .sp .nf .ft C >>> await redis.georadiusbymember(\(aqSicily\(aq, \(aqPalermo\(aq, 200, \(aqkm\(aq, \&... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] .ft P .fi .UNINDENT .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – radius is not float or int .IP \(bu 2 \fI\%TypeError\fP – count is not int .IP \(bu 2 \fI\%ValueError\fP – if unit not equal \fBm\fP, \fBkm\fP, \fBmi\fP or \fBft\fP .IP \(bu 2 \fI\%ValueError\fP – if sort not equal \fBASC\fP or \fBDESC\fP .UNINDENT .TP .B Return type \fI\%list\fP[\fI\%str\fP] or \fI\%list\fP[GeoMember] .UNINDENT .UNINDENT .UNINDENT .SS Geo commands result wrappers .INDENT 0.0 .TP .B class aioredis.commands.GeoPoint(longitude, latitude) Bases: \fI\%tuple\fP .sp Named tuple representing result returned by \fBGEOPOS\fP and \fBGEORADIUS\fP commands. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBlongitude\fP (\fI\%float\fP) – longitude value. .IP \(bu 2 \fBlatitude\fP (\fI\%float\fP) – latitude value. .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.commands.GeoMember(member, dist, hash, coord) Bases: \fI\%tuple\fP .sp Named tuple representing result returned by \fBGEORADIUS\fP and \fBGEORADIUSBYMEMBER\fP commands. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBmember\fP (\fI\%str\fP\fI or \fP\fI\%bytes\fP) – Value of geo sorted set item; .IP \(bu 2 \fBdist\fP (\fI\%None\fP\fI or \fP\fI\%float\fP) – Distance in units passed to call. \fBNone\fP if \fBwith_dist\fP was not set in \fI\%georadius()\fP call. .IP \(bu 2 \fBhash\fP (\fI\%None\fP\fI or \fP\fI\%int\fP) – Geo\-hash represented as number. \fBNone\fP if \fBwith_hash\fP was not in \fI\%georadius()\fP call. .IP \(bu 2 \fBcoord\fP (\fI\%None\fP\fI or \fP\fIGeoPoint\fP) – Coordinate of geospatial index member. \fBNone\fP if \fBwith_coord\fP was not set in \fI\%georadius()\fP call. .UNINDENT .UNINDENT .UNINDENT .SS Strings commands .INDENT 0.0 .TP .B class aioredis.commands.StringCommandsMixin String commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#string\fP .INDENT 7.0 .TP .B append(key, value) Append a value to key. .UNINDENT .INDENT 7.0 .TP .B bitcount(key, start=None, end=None) Count set bits in a string. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if only start or end specified. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B bitop_and(dest, key, *keys) Perform bitwise AND operations between strings. .UNINDENT .INDENT 7.0 .TP .B bitop_not(dest, key) Perform bitwise NOT operations between strings. .UNINDENT .INDENT 7.0 .TP .B bitop_or(dest, key, *keys) Perform bitwise OR operations between strings. .UNINDENT .INDENT 7.0 .TP .B bitop_xor(dest, key, *keys) Perform bitwise XOR operations between strings. .UNINDENT .INDENT 7.0 .TP .B bitpos(key, bit, start=None, end=None) Find first bit set or clear in a string. .INDENT 7.0 .TP .B Raises \fI\%ValueError\fP – if bit is not 0 or 1 .UNINDENT .UNINDENT .INDENT 7.0 .TP .B decr(key) Decrement the integer value of a key by one. .UNINDENT .INDENT 7.0 .TP .B decrby(key, decrement) Decrement the integer value of a key by the given number. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if decrement is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B get(key, *, encoding=) Get the value of a key. .UNINDENT .INDENT 7.0 .TP .B getbit(key, offset) Returns the bit value at offset in the string value stored at key. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if offset is not int .IP \(bu 2 \fI\%ValueError\fP – if offset is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B getrange(key, start, end, *, encoding=) Get a substring of the string stored at a key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if start or end is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B getset(key, value, *, encoding=) Set the string value of a key and return its old value. .UNINDENT .INDENT 7.0 .TP .B incr(key) Increment the integer value of a key by one. .UNINDENT .INDENT 7.0 .TP .B incrby(key, increment) Increment the integer value of a key by the given amount. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if increment is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B incrbyfloat(key, increment) Increment the float value of a key by the given amount. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if increment is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B mget(key, *keys, encoding=) Get the values of all the given keys. .UNINDENT .INDENT 7.0 .TP .B mset(key, value, *pairs) Set multiple keys to multiple values. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if len of pairs is not event number .UNINDENT .UNINDENT .INDENT 7.0 .TP .B msetnx(key, value, *pairs) Set multiple keys to multiple values, only if none of the keys exist. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if len of pairs is not event number .UNINDENT .UNINDENT .INDENT 7.0 .TP .B psetex(key, milliseconds, value) Set the value and expiration in milliseconds of a key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if milliseconds is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B set(key, value, *, expire=0, pexpire=0, exist=None) Set the string value of a key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if expire or pexpire is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B setbit(key, offset, value) Sets or clears the bit at offset in the string value stored at key. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if offset is not int .IP \(bu 2 \fI\%ValueError\fP – if offset is less then 0 or value is not 0 or 1 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B setex(key, seconds, value) Set the value and expiration of a key. .sp If seconds is float it will be multiplied by 1000 coerced to int and passed to \fIpsetex\fP method. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if seconds is neither int nor float .UNINDENT .UNINDENT .INDENT 7.0 .TP .B setnx(key, value) Set the value of a key, only if the key does not exist. .UNINDENT .INDENT 7.0 .TP .B setrange(key, offset, value) Overwrite part of a string at key starting at the specified offset. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if offset is not int .IP \(bu 2 \fI\%ValueError\fP – if offset less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B strlen(key) Get the length of the value stored in a key. .UNINDENT .UNINDENT .SS Hash commands .INDENT 0.0 .TP .B class aioredis.commands.HashCommandsMixin Hash commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#hash\fP .INDENT 7.0 .TP .B hdel(key, field, *fields) Delete one or more hash fields. .UNINDENT .INDENT 7.0 .TP .B hexists(key, field) Determine if hash field exists. .UNINDENT .INDENT 7.0 .TP .B hget(key, field, *, encoding=) Get the value of a hash field. .UNINDENT .INDENT 7.0 .TP .B hgetall(key, *, encoding=) Get all the fields and values in a hash. .UNINDENT .INDENT 7.0 .TP .B hincrby(key, field, increment=1) Increment the integer value of a hash field by the given number. .UNINDENT .INDENT 7.0 .TP .B hincrbyfloat(key, field, increment=1.0) Increment the float value of a hash field by the given number. .UNINDENT .INDENT 7.0 .TP .B hkeys(key, *, encoding=) Get all the fields in a hash. .UNINDENT .INDENT 7.0 .TP .B hlen(key) Get the number of fields in a hash. .UNINDENT .INDENT 7.0 .TP .B hmget(key, field, *fields, encoding=) Get the values of all the given fields. .UNINDENT .INDENT 7.0 .TP .B hmset(key, field, value, *pairs) Set multiple hash fields to multiple values. .UNINDENT .INDENT 7.0 .TP .B hmset_dict(key, *args, **kwargs) Set multiple hash fields to multiple values. .sp dict can be passed as first positional argument: .sp .nf .ft C >>> await redis.hmset_dict( \&... \(aqkey\(aq, {\(aqfield1\(aq: \(aqvalue1\(aq, \(aqfield2\(aq: \(aqvalue2\(aq}) .ft P .fi .sp or keyword arguments can be used: .sp .nf .ft C >>> await redis.hmset_dict( \&... \(aqkey\(aq, field1=\(aqvalue1\(aq, field2=\(aqvalue2\(aq) .ft P .fi .sp or dict argument can be mixed with kwargs: .sp .nf .ft C >>> await redis.hmset_dict( \&... \(aqkey\(aq, {\(aqfield1\(aq: \(aqvalue1\(aq}, field2=\(aqvalue2\(aq) .ft P .fi .sp \fBNOTE:\fP .INDENT 7.0 .INDENT 3.5 \fBdict\fP and \fBkwargs\fP not get mixed into single dictionary, if both specified and both have same key(s) – \fBkwargs\fP will win: .sp .nf .ft C >>> await redis.hmset_dict(\(aqkey\(aq, {\(aqfoo\(aq: \(aqbar\(aq}, foo=\(aqbaz\(aq) >>> await redis.hget(\(aqkey\(aq, \(aqfoo\(aq, encoding=\(aqutf\-8\(aq) \(aqbaz\(aq .ft P .fi .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B hscan(key, cursor=0, match=None, count=None) Incrementally iterate hash fields and associated values. .UNINDENT .INDENT 7.0 .TP .B hset(key, field, value) Set the string value of a hash field. .UNINDENT .INDENT 7.0 .TP .B hsetnx(key, field, value) Set the value of a hash field, only if the field does not exist. .UNINDENT .INDENT 7.0 .TP .B hstrlen(key, field) Get the length of the value of a hash field. .UNINDENT .INDENT 7.0 .TP .B hvals(key, *, encoding=) Get all the values in a hash. .UNINDENT .INDENT 7.0 .TP .B ihscan(key, *, match=None, count=None) Incrementally iterate sorted set items using async for. .sp Usage example: .sp .nf .ft C >>> async for name, val in redis.ihscan(key, match=\(aqsomething*\(aq): \&... print(\(aqMatched:\(aq, name, \(aq\->\(aq, val) .ft P .fi .UNINDENT .UNINDENT .SS List commands .INDENT 0.0 .TP .B class aioredis.commands.ListCommandsMixin List commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#list\fP .INDENT 7.0 .TP .B blpop(key, *keys, timeout=0, encoding=) Remove and get the first element in a list, or block until one is available. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if timeout is not int .IP \(bu 2 \fI\%ValueError\fP – if timeout is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B brpop(key, *keys, timeout=0, encoding=) Remove and get the last element in a list, or block until one is available. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if timeout is not int .IP \(bu 2 \fI\%ValueError\fP – if timeout is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B brpoplpush(sourcekey, destkey, timeout=0, encoding=) Remove and get the last element in a list, or block until one is available. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if timeout is not int .IP \(bu 2 \fI\%ValueError\fP – if timeout is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B lindex(key, index, *, encoding=) Get an element from a list by its index. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if index is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B linsert(key, pivot, value, before=False) Inserts value in the list stored at key either before or after the reference value pivot. .UNINDENT .INDENT 7.0 .TP .B llen(key) Returns the length of the list stored at key. .UNINDENT .INDENT 7.0 .TP .B lpop(key, *, encoding=) Removes and returns the first element of the list stored at key. .UNINDENT .INDENT 7.0 .TP .B lpush(key, value, *values) Insert all the specified values at the head of the list stored at key. .UNINDENT .INDENT 7.0 .TP .B lpushx(key, value) Inserts value at the head of the list stored at key, only if key already exists and holds a list. .UNINDENT .INDENT 7.0 .TP .B lrange(key, start, stop, *, encoding=) Returns the specified elements of the list stored at key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if start or stop is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B lrem(key, count, value) Removes the first count occurrences of elements equal to value from the list stored at key. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if count is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B lset(key, index, value) Sets the list element at index to value. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if index is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B ltrim(key, start, stop) Trim an existing list so that it will contain only the specified range of elements specified. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if start or stop is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B rpop(key, *, encoding=) Removes and returns the last element of the list stored at key. .UNINDENT .INDENT 7.0 .TP .B rpoplpush(sourcekey, destkey, *, encoding=) Atomically returns and removes the last element (tail) of the list stored at source, and pushes the element at the first element (head) of the list stored at destination. .UNINDENT .INDENT 7.0 .TP .B rpush(key, value, *values) Insert all the specified values at the tail of the list stored at key. .UNINDENT .INDENT 7.0 .TP .B rpushx(key, value) Inserts value at the tail of the list stored at key, only if key already exists and holds a list. .UNINDENT .UNINDENT .SS Set commands .INDENT 0.0 .TP .B class aioredis.commands.SetCommandsMixin Set commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#set\fP .INDENT 7.0 .TP .B isscan(key, *, match=None, count=None) Incrementally iterate set elements using async for. .sp Usage example: .sp .nf .ft C >>> async for val in redis.isscan(key, match=\(aqsomething*\(aq): \&... print(\(aqMatched:\(aq, val) .ft P .fi .UNINDENT .INDENT 7.0 .TP .B sadd(key, member, *members) Add one or more members to a set. .UNINDENT .INDENT 7.0 .TP .B scard(key) Get the number of members in a set. .UNINDENT .INDENT 7.0 .TP .B sdiff(key, *keys) Subtract multiple sets. .UNINDENT .INDENT 7.0 .TP .B sdiffstore(destkey, key, *keys) Subtract multiple sets and store the resulting set in a key. .UNINDENT .INDENT 7.0 .TP .B sinter(key, *keys) Intersect multiple sets. .UNINDENT .INDENT 7.0 .TP .B sinterstore(destkey, key, *keys) Intersect multiple sets and store the resulting set in a key. .UNINDENT .INDENT 7.0 .TP .B sismember(key, member) Determine if a given value is a member of a set. .UNINDENT .INDENT 7.0 .TP .B smembers(key, *, encoding=) Get all the members in a set. .UNINDENT .INDENT 7.0 .TP .B smove(sourcekey, destkey, member) Move a member from one set to another. .UNINDENT .INDENT 7.0 .TP .B spop(key, *, encoding=) Remove and return a random member from a set. .UNINDENT .INDENT 7.0 .TP .B srandmember(key, count=None, *, encoding=) Get one or multiple random members from a set. .UNINDENT .INDENT 7.0 .TP .B srem(key, member, *members) Remove one or more members from a set. .UNINDENT .INDENT 7.0 .TP .B sscan(key, cursor=0, match=None, count=None) Incrementally iterate Set elements. .UNINDENT .INDENT 7.0 .TP .B sunion(key, *keys) Add multiple sets. .UNINDENT .INDENT 7.0 .TP .B sunionstore(destkey, key, *keys) Add multiple sets and store the resulting set in a key. .UNINDENT .UNINDENT .SS Sorted Set commands .INDENT 0.0 .TP .B class aioredis.commands.SortedSetCommandsMixin Sorted Sets commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#sorted_set\fP .INDENT 7.0 .TP .B izscan(key, *, match=None, count=None) Incrementally iterate sorted set items using async for. .sp Usage example: .sp .nf .ft C >>> async for val, score in redis.izscan(key, match=\(aqsomething*\(aq): \&... print(\(aqMatched:\(aq, val, \(aq:\(aq, score) .ft P .fi .UNINDENT .INDENT 7.0 .TP .B zadd(key, score, member, *pairs, exist=None) Add one or more members to a sorted set or update its score. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – score not int or float .IP \(bu 2 \fI\%TypeError\fP – length of pairs is not even number .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zcard(key) Get the number of members in a sorted set. .UNINDENT .INDENT 7.0 .TP .B zcount(key, min=\-inf, max=inf, *, exclude=None) Count the members in a sorted set with scores within the given values. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – min or max is not float or int .IP \(bu 2 \fI\%ValueError\fP – if min grater then max .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zincrby(key, increment, member) Increment the score of a member in a sorted set. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – increment is not float or int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zinterstore(destkey, key, *keys, with_weights=False, aggregate=None) Intersect multiple sorted sets and store result in a new key. .INDENT 7.0 .TP .B Parameters \fBwith_weights\fP (\fI\%bool\fP) – when set to true each key must be a tuple in form of (key, weight) .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zlexcount(key, min=b\(aq\-\(aq, max=b\(aq+\(aq, include_min=True, include_max=True) Count the number of members in a sorted set between a given lexicographical range. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min is not bytes .IP \(bu 2 \fI\%TypeError\fP – if max is not bytes .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrange(key, start=0, stop=\-1, withscores=False, encoding=) Return a range of members in a sorted set, by index. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if start is not int .IP \(bu 2 \fI\%TypeError\fP – if stop is not int .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrangebylex(key, min=b\(aq\-\(aq, max=b\(aq+\(aq, include_min=True, include_max=True, offset=None, count=None, encoding=) Return a range of members in a sorted set, by lexicographical range. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min is not bytes .IP \(bu 2 \fI\%TypeError\fP – if max is not bytes .IP \(bu 2 \fI\%TypeError\fP – if both offset and count are not specified .IP \(bu 2 \fI\%TypeError\fP – if offset is not bytes .IP \(bu 2 \fI\%TypeError\fP – if count is not bytes .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrangebyscore(key, min=\-inf, max=inf, withscores=False, offset=None, count=None, *, exclude=None, encoding=) Return a range of members in a sorted set, by score. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min or max is not float or int .IP \(bu 2 \fI\%TypeError\fP – if both offset and count are not specified .IP \(bu 2 \fI\%TypeError\fP – if offset is not int .IP \(bu 2 \fI\%TypeError\fP – if count is not int .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrank(key, member) Determine the index of a member in a sorted set. .UNINDENT .INDENT 7.0 .TP .B zrem(key, member, *members) Remove one or more members from a sorted set. .UNINDENT .INDENT 7.0 .TP .B zremrangebylex(key, min=b\(aq\-\(aq, max=b\(aq+\(aq, include_min=True, include_max=True) Remove all members in a sorted set between the given lexicographical range. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min is not bytes .IP \(bu 2 \fI\%TypeError\fP – if max is not bytes .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zremrangebyrank(key, start, stop) Remove all members in a sorted set within the given indexes. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if start is not int .IP \(bu 2 \fI\%TypeError\fP – if stop is not int .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zremrangebyscore(key, min=\-inf, max=inf, *, exclude=None) Remove all members in a sorted set within the given scores. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if min or max is not int or float .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrevrange(key, start, stop, withscores=False, encoding=) Return a range of members in a sorted set, by index, with scores ordered from high to low. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if start or stop is not int .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrevrangebylex(key, min=b\(aq\-\(aq, max=b\(aq+\(aq, include_min=True, include_max=True, offset=None, count=None, encoding=) Return a range of members in a sorted set, by lexicographical range from high to low. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min is not bytes .IP \(bu 2 \fI\%TypeError\fP – if max is not bytes .IP \(bu 2 \fI\%TypeError\fP – if both offset and count are not specified .IP \(bu 2 \fI\%TypeError\fP – if offset is not bytes .IP \(bu 2 \fI\%TypeError\fP – if count is not bytes .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrevrangebyscore(key, max=inf, min=\-inf, *, exclude=None, withscores=False, offset=None, count=None, encoding=) Return a range of members in a sorted set, by score, with scores ordered from high to low. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if min or max is not float or int .IP \(bu 2 \fI\%TypeError\fP – if both offset and count are not specified .IP \(bu 2 \fI\%TypeError\fP – if offset is not int .IP \(bu 2 \fI\%TypeError\fP – if count is not int .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B zrevrank(key, member) Determine the index of a member in a sorted set, with scores ordered from high to low. .UNINDENT .INDENT 7.0 .TP .B zscan(key, cursor=0, match=None, count=None) Incrementally iterate sorted sets elements and associated scores. .UNINDENT .INDENT 7.0 .TP .B zscore(key, member) Get the score associated with the given member in a sorted set. .UNINDENT .INDENT 7.0 .TP .B zunionstore(destkey, key, *keys, with_weights=False, aggregate=None) Add multiple sorted sets and store result in a new key. .UNINDENT .UNINDENT .SS Server commands .INDENT 0.0 .TP .B class aioredis.commands.ServerCommandsMixin Server commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#server\fP .INDENT 7.0 .TP .B bgrewriteaof() Asynchronously rewrite the append\-only file. .UNINDENT .INDENT 7.0 .TP .B bgsave() Asynchronously save the dataset to disk. .UNINDENT .INDENT 7.0 .TP .B client_getname(encoding=) Get the current connection name. .UNINDENT .INDENT 7.0 .TP .B client_kill() Kill the connection of a client. .sp \fBWARNING:\fP .INDENT 7.0 .INDENT 3.5 Not Implemented .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B client_list() Get the list of client connections. .sp Returns list of ClientInfo named tuples. .UNINDENT .INDENT 7.0 .TP .B client_pause(timeout) Stop processing commands from clients for \fItimeout\fP milliseconds. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if timeout is not int .IP \(bu 2 \fI\%ValueError\fP – if timeout is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B client_setname(name) Set the current connection name. .UNINDENT .INDENT 7.0 .TP .B command() Get array of Redis commands. .UNINDENT .INDENT 7.0 .TP .B command_count() Get total number of Redis commands. .UNINDENT .INDENT 7.0 .TP .B command_getkeys(command, *args, encoding=\(aqutf\-8\(aq) Extract keys given a full Redis command. .UNINDENT .INDENT 7.0 .TP .B command_info(command, *commands) Get array of specific Redis command details. .UNINDENT .INDENT 7.0 .TP .B config_get(parameter=\(aq*\(aq) Get the value of a configuration parameter(s). .sp If called without argument will return all parameters. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if parameter is not string .UNINDENT .UNINDENT .INDENT 7.0 .TP .B config_resetstat() Reset the stats returned by INFO. .UNINDENT .INDENT 7.0 .TP .B config_rewrite() Rewrite the configuration file with the in memory configuration. .UNINDENT .INDENT 7.0 .TP .B config_set(parameter, value) Set a configuration parameter to the given value. .UNINDENT .INDENT 7.0 .TP .B dbsize() Return the number of keys in the selected database. .UNINDENT .INDENT 7.0 .TP .B debug_object(key) Get debugging information about a key. .UNINDENT .INDENT 7.0 .TP .B debug_segfault(key) Make the server crash. .UNINDENT .INDENT 7.0 .TP .B debug_sleep(timeout) Suspend connection for timeout seconds. .UNINDENT .INDENT 7.0 .TP .B flushall() Remove all keys from all databases. .UNINDENT .INDENT 7.0 .TP .B flushdb() Remove all keys from the current database. .UNINDENT .INDENT 7.0 .TP .B info(section=\(aqdefault\(aq) Get information and statistics about the server. .sp If called without argument will return default set of sections. For available sections, see \fI\%http://redis.io/commands/INFO\fP .INDENT 7.0 .TP .B Raises \fI\%ValueError\fP – if section is invalid .UNINDENT .UNINDENT .INDENT 7.0 .TP .B lastsave() Get the UNIX time stamp of the last successful save to disk. .UNINDENT .INDENT 7.0 .TP .B monitor() Listen for all requests received by the server in real time. .sp \fBWARNING:\fP .INDENT 7.0 .INDENT 3.5 Will not be implemented for now. .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B role() Return the role of the server instance. .sp Returns named tuples describing role of the instance. For fields information see \fI\%http://redis.io/commands/role#output\-format\fP .UNINDENT .INDENT 7.0 .TP .B save() Synchronously save the dataset to disk. .UNINDENT .INDENT 7.0 .TP .B shutdown(save=None) Synchronously save the dataset to disk and then shut down the server. .UNINDENT .INDENT 7.0 .TP .B slaveof(host=, port=None) Make the server a slave of another instance, or promote it as master. .sp Calling \fBslaveof(None)\fP will send \fBSLAVEOF NO ONE\fP\&. .sp Changed in version v0.2.6: \fBslaveof()\fP form deprecated in favour of explicit \fBslaveof(None)\fP\&. .UNINDENT .INDENT 7.0 .TP .B slowlog_get(length=None) Returns the Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B slowlog_len() Returns length of Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B slowlog_reset() Resets Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B sync() Redis\-server internal command used for replication. .UNINDENT .INDENT 7.0 .TP .B time() Return current server time. .UNINDENT .UNINDENT .SS HyperLogLog commands .INDENT 0.0 .TP .B class aioredis.commands.HyperLogLogCommandsMixin HyperLogLog commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#hyperloglog\fP .INDENT 7.0 .TP .B pfadd(key, value, *values) Adds the specified elements to the specified HyperLogLog. .UNINDENT .INDENT 7.0 .TP .B pfcount(key, *keys) Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s). .UNINDENT .INDENT 7.0 .TP .B pfmerge(destkey, sourcekey, *sourcekeys) Merge N different HyperLogLogs into a single one. .UNINDENT .UNINDENT .SS Transaction commands .INDENT 0.0 .TP .B class aioredis.commands.TransactionsCommandsMixin Transaction commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#transactions\fP .sp Transactions HOWTO: .sp .nf .ft C >>> tr = redis.multi_exec() >>> result_future1 = tr.incr(\(aqfoo\(aq) >>> result_future2 = tr.incr(\(aqbar\(aq) >>> try: \&... result = await tr.execute() \&... except MultiExecError: \&... pass # check what happened >>> result1 = await result_future1 >>> result2 = await result_future2 >>> assert result == [result1, result2] .ft P .fi .INDENT 7.0 .TP .B multi_exec() Returns MULTI/EXEC pipeline wrapper. .sp Usage: .sp .nf .ft C >>> tr = redis.multi_exec() >>> fut1 = tr.incr(\(aqfoo\(aq) # NO \(gaawait\(ga as it will block forever! >>> fut2 = tr.incr(\(aqbar\(aq) >>> result = await tr.execute() >>> result [1, 1] >>> await asyncio.gather(fut1, fut2) [1, 1] .ft P .fi .UNINDENT .INDENT 7.0 .TP .B pipeline() Returns \fI\%Pipeline\fP object to execute bulk of commands. .sp It is provided for convenience. Commands can be pipelined without it. .sp Example: .sp .nf .ft C >>> pipe = redis.pipeline() >>> fut1 = pipe.incr(\(aqfoo\(aq) # NO \(gaawait\(ga as it will block forever! >>> fut2 = pipe.incr(\(aqbar\(aq) >>> result = await pipe.execute() >>> result [1, 1] >>> await asyncio.gather(fut1, fut2) [1, 1] >>> # >>> # The same can be done without pipeline: >>> # >>> fut1 = redis.incr(\(aqfoo\(aq) # the \(aqINCRY foo\(aq command already sent >>> fut2 = redis.incr(\(aqbar\(aq) >>> await asyncio.gather(fut1, fut2) [2, 2] .ft P .fi .UNINDENT .INDENT 7.0 .TP .B unwatch() Forget about all watched keys. .UNINDENT .INDENT 7.0 .TP .B watch(key, *keys) Watch the given keys to determine execution of the MULTI/EXEC block. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.commands.Pipeline(connection, commands_factory=lambda conn: conn, *, loop=None) Commands pipeline. .sp Buffers commands for execution in bulk. .sp This class implements \fI__getattr__\fP method allowing to call methods on instance created with \fBcommands_factory\fP\&. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBconnection\fP (\fIaioredis.RedisConnection\fP) – Redis connection .IP \(bu 2 \fBcommands_factory\fP (\fI\%callable\fP) – Commands factory to get methods from. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .UNINDENT .INDENT 7.0 .TP .B coroutine execute(*, return_exceptions=False) Executes all buffered commands and returns result. .sp Any exception that is raised by any command is caught and raised later when processing results. .sp If \fBreturn_exceptions\fP is set to \fBTrue\fP then all collected errors are returned in resulting list otherwise single \fBaioredis.PipelineError\fP exception is raised (containing all collected errors). .INDENT 7.0 .TP .B Parameters \fBreturn_exceptions\fP (\fI\%bool\fP) – Raise or return exceptions. .TP .B Raises \fBaioredis.PipelineError\fP – Raised when any command caused error. .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.commands.MultiExec(connection, commands_factory=lambda conn: conn, *, loop=None) Bases: \fI\%Pipeline\fP\&. .sp Multi/Exec pipeline wrapper. .sp See \fI\%Pipeline\fP for parameters description. .INDENT 7.0 .TP .B coroutine execute(*, return_exceptions=False) Executes all buffered commands and returns result. .sp see \fI\%Pipeline.execute()\fP for details. .INDENT 7.0 .TP .B Parameters \fBreturn_exceptions\fP (\fI\%bool\fP) – Raise or return exceptions. .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fBaioredis.MultiExecError\fP – Raised instead of \fBaioredis.PipelineError\fP .IP \(bu 2 \fBaioredis.WatchVariableError\fP – If watched variable is changed .UNINDENT .UNINDENT .UNINDENT .UNINDENT .SS Scripting commands .INDENT 0.0 .TP .B class aioredis.commands.ScriptingCommandsMixin Set commands mixin. .sp For commands details see: \fI\%http://redis.io/commands#scripting\fP .INDENT 7.0 .TP .B eval(script, keys=[], args=[]) Execute a Lua script server side. .UNINDENT .INDENT 7.0 .TP .B evalsha(digest, keys=[], args=[]) Execute a Lua script server side by its SHA1 digest. .UNINDENT .INDENT 7.0 .TP .B script_exists(digest, *digests) Check existence of scripts in the script cache. .UNINDENT .INDENT 7.0 .TP .B script_flush() Remove all the scripts from the script cache. .UNINDENT .INDENT 7.0 .TP .B script_kill() Kill the script currently in execution. .UNINDENT .INDENT 7.0 .TP .B script_load(script) Load the specified Lua script into the script cache. .UNINDENT .UNINDENT .SS Server commands .INDENT 0.0 .TP .B class aioredis.commands.ServerCommandsMixin Server commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#server\fP .INDENT 7.0 .TP .B bgrewriteaof() Asynchronously rewrite the append\-only file. .UNINDENT .INDENT 7.0 .TP .B bgsave() Asynchronously save the dataset to disk. .UNINDENT .INDENT 7.0 .TP .B client_getname(encoding=) Get the current connection name. .UNINDENT .INDENT 7.0 .TP .B client_kill() Kill the connection of a client. .sp \fBWARNING:\fP .INDENT 7.0 .INDENT 3.5 Not Implemented .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B client_list() Get the list of client connections. .sp Returns list of ClientInfo named tuples. .UNINDENT .INDENT 7.0 .TP .B client_pause(timeout) Stop processing commands from clients for \fItimeout\fP milliseconds. .INDENT 7.0 .TP .B Raises .INDENT 7.0 .IP \(bu 2 \fI\%TypeError\fP – if timeout is not int .IP \(bu 2 \fI\%ValueError\fP – if timeout is less then 0 .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B client_setname(name) Set the current connection name. .UNINDENT .INDENT 7.0 .TP .B command() Get array of Redis commands. .UNINDENT .INDENT 7.0 .TP .B command_count() Get total number of Redis commands. .UNINDENT .INDENT 7.0 .TP .B command_getkeys(command, *args, encoding=\(aqutf\-8\(aq) Extract keys given a full Redis command. .UNINDENT .INDENT 7.0 .TP .B command_info(command, *commands) Get array of specific Redis command details. .UNINDENT .INDENT 7.0 .TP .B config_get(parameter=\(aq*\(aq) Get the value of a configuration parameter(s). .sp If called without argument will return all parameters. .INDENT 7.0 .TP .B Raises \fI\%TypeError\fP – if parameter is not string .UNINDENT .UNINDENT .INDENT 7.0 .TP .B config_resetstat() Reset the stats returned by INFO. .UNINDENT .INDENT 7.0 .TP .B config_rewrite() Rewrite the configuration file with the in memory configuration. .UNINDENT .INDENT 7.0 .TP .B config_set(parameter, value) Set a configuration parameter to the given value. .UNINDENT .INDENT 7.0 .TP .B dbsize() Return the number of keys in the selected database. .UNINDENT .INDENT 7.0 .TP .B debug_object(key) Get debugging information about a key. .UNINDENT .INDENT 7.0 .TP .B debug_segfault(key) Make the server crash. .UNINDENT .INDENT 7.0 .TP .B debug_sleep(timeout) Suspend connection for timeout seconds. .UNINDENT .INDENT 7.0 .TP .B flushall() Remove all keys from all databases. .UNINDENT .INDENT 7.0 .TP .B flushdb() Remove all keys from the current database. .UNINDENT .INDENT 7.0 .TP .B info(section=\(aqdefault\(aq) Get information and statistics about the server. .sp If called without argument will return default set of sections. For available sections, see \fI\%http://redis.io/commands/INFO\fP .INDENT 7.0 .TP .B Raises \fI\%ValueError\fP – if section is invalid .UNINDENT .UNINDENT .INDENT 7.0 .TP .B lastsave() Get the UNIX time stamp of the last successful save to disk. .UNINDENT .INDENT 7.0 .TP .B monitor() Listen for all requests received by the server in real time. .sp \fBWARNING:\fP .INDENT 7.0 .INDENT 3.5 Will not be implemented for now. .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B role() Return the role of the server instance. .sp Returns named tuples describing role of the instance. For fields information see \fI\%http://redis.io/commands/role#output\-format\fP .UNINDENT .INDENT 7.0 .TP .B save() Synchronously save the dataset to disk. .UNINDENT .INDENT 7.0 .TP .B shutdown(save=None) Synchronously save the dataset to disk and then shut down the server. .UNINDENT .INDENT 7.0 .TP .B slaveof(host=, port=None) Make the server a slave of another instance, or promote it as master. .sp Calling \fBslaveof(None)\fP will send \fBSLAVEOF NO ONE\fP\&. .sp Changed in version v0.2.6: \fBslaveof()\fP form deprecated in favour of explicit \fBslaveof(None)\fP\&. .UNINDENT .INDENT 7.0 .TP .B slowlog_get(length=None) Returns the Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B slowlog_len() Returns length of Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B slowlog_reset() Resets Redis slow queries log. .UNINDENT .INDENT 7.0 .TP .B sync() Redis\-server internal command used for replication. .UNINDENT .INDENT 7.0 .TP .B time() Return current server time. .UNINDENT .UNINDENT .SS Pub/Sub commands .sp Also see aioredis.Channel\&. .INDENT 0.0 .TP .B class aioredis.commands.PubSubCommandsMixin Pub/Sub commands mixin. .sp For commands details see: \fI\%http://redis.io/commands/#pubsub\fP .INDENT 7.0 .TP .B channels Returns read\-only channels dict. .sp See \fBpubsub_channels\fP .UNINDENT .INDENT 7.0 .TP .B in_pubsub Indicates that connection is in PUB/SUB mode. .sp Provides the number of subscribed channels. .UNINDENT .INDENT 7.0 .TP .B patterns Returns read\-only patterns dict. .sp See \fBpubsub_patterns\fP .UNINDENT .INDENT 7.0 .TP .B psubscribe(pattern, *patterns) Switch connection to Pub/Sub mode and subscribe to specified patterns. .sp Arguments can be instances of \fBChannel\fP\&. .sp Returns \fI\%asyncio.gather()\fP coroutine which when done will return a list of subscribed \fBChannel\fP objects with \fBis_pattern\fP property set to \fBTrue\fP\&. .UNINDENT .INDENT 7.0 .TP .B publish(channel, message) Post a message to channel. .UNINDENT .INDENT 7.0 .TP .B publish_json(channel, obj) Post a JSON\-encoded message to channel. .UNINDENT .INDENT 7.0 .TP .B pubsub_channels(pattern=None) Lists the currently active channels. .UNINDENT .INDENT 7.0 .TP .B pubsub_numpat() Returns the number of subscriptions to patterns. .UNINDENT .INDENT 7.0 .TP .B pubsub_numsub(*channels) Returns the number of subscribers for the specified channels. .UNINDENT .INDENT 7.0 .TP .B punsubscribe(pattern, *patterns) Unsubscribe from specific patterns. .sp Arguments can be instances of \fBChannel\fP\&. .UNINDENT .INDENT 7.0 .TP .B subscribe(channel, *channels) Switch connection to Pub/Sub mode and subscribe to specified channels. .sp Arguments can be instances of \fBChannel\fP\&. .sp Returns \fI\%asyncio.gather()\fP coroutine which when done will return a list of \fBChannel\fP objects. .UNINDENT .INDENT 7.0 .TP .B unsubscribe(channel, *channels) Unsubscribe from specific channels. .sp Arguments can be instances of \fBChannel\fP\&. .UNINDENT .UNINDENT .SS Cluster commands .sp \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 Current release (1.0.0) of the library \fBdoes not support\fP \fI\%Redis Cluster\fP in a full manner. It provides only several API methods which may be changed in future. .UNINDENT .UNINDENT .SH AIOREDIS.ABC — INTERFACES REFERENCE .sp This module defines several abstract classes that must be used when implementing custom connection managers or other features. .INDENT 0.0 .TP .B class aioredis.abc.AbcConnection Bases: \fI\%abc.ABC\fP .sp Abstract connection interface. .INDENT 7.0 .TP .B address Connection address. .UNINDENT .INDENT 7.0 .TP .B close() Perform connection(s) close and resources cleanup. .UNINDENT .INDENT 7.0 .TP .B closed Flag indicating if connection is closing or already closed. .UNINDENT .INDENT 7.0 .TP .B db Current selected DB index. .UNINDENT .INDENT 7.0 .TP .B encoding Current set connection codec. .UNINDENT .INDENT 7.0 .TP .B execute(command, *args, **kwargs) Execute redis command. .UNINDENT .INDENT 7.0 .TP .B execute_pubsub(command, *args, **kwargs) Execute Redis (p)subscribe/(p)unsubscribe commands. .UNINDENT .INDENT 7.0 .TP .B in_pubsub Returns number of subscribed channels. .sp Can be tested as bool indicating Pub/Sub mode state. .UNINDENT .INDENT 7.0 .TP .B pubsub_channels Read\-only channels dict. .UNINDENT .INDENT 7.0 .TP .B pubsub_patterns Read\-only patterns dict. .UNINDENT .INDENT 7.0 .TP .B coroutine wait_closed() Coroutine waiting until all resources are closed/released/cleaned up. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.abc.AbcPool Bases: \fI\%aioredis.abc.AbcConnection\fP .sp Abstract connections pool interface. .sp Inherited from AbcConnection so both have common interface for executing Redis commands. .INDENT 7.0 .TP .B coroutine acquire() Acquires connection from pool. .UNINDENT .INDENT 7.0 .TP .B address Connection address or None. .UNINDENT .INDENT 7.0 .TP .B get_connection() Gets free connection from pool in a sync way. .sp If no connection available — returns None. .UNINDENT .INDENT 7.0 .TP .B release(conn) Releases connection to pool. .INDENT 7.0 .TP .B Parameters \fBconn\fP (\fIAbcConnection\fP) – Owned connection to be released. .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.abc.AbcChannel Bases: \fI\%abc.ABC\fP .sp Abstract Pub/Sub Channel interface. .INDENT 7.0 .TP .B close() Marks Channel as closed, no more messages will be sent to it. .sp Called by RedisConnection when channel is unsubscribed or connection is closed. .UNINDENT .INDENT 7.0 .TP .B coroutine get() Wait and return new message. .sp Will raise \fBChannelClosedError\fP if channel is not active. .UNINDENT .INDENT 7.0 .TP .B is_active Flag indicating that channel has unreceived messages and not marked as closed. .UNINDENT .INDENT 7.0 .TP .B is_pattern Boolean flag indicating if channel is pattern channel. .UNINDENT .INDENT 7.0 .TP .B name Encoded channel name or pattern. .UNINDENT .INDENT 7.0 .TP .B put_nowait(data) Send data to channel. .sp Called by RedisConnection when new message received. For pattern subscriptions data will be a tuple of channel name and message itself. .UNINDENT .UNINDENT .SH AIOREDIS.PUBSUB — PUB/SUB TOOLS REFERENCE .sp Module provides a Pub/Sub listener interface implementing multi\-producers, single\-consumer queue pattern. .INDENT 0.0 .TP .B class aioredis.pubsub.Receiver(loop=None) Multi\-producers, single\-consumer Pub/Sub queue. .sp Can be used in cases where a single consumer task must read messages from several different channels (where pattern subscriptions may not work well or channels can be added/removed dynamically). .sp Example use case: .sp .nf .ft C >>> from aioredis.pubsub import Receiver >>> from aioredis.abc import AbcChannel >>> mpsc = Receiver(loop=loop) >>> async def reader(mpsc): \&... async for channel, msg in mpsc.iter(): \&... assert isinstance(channel, AbcChannel) \&... print("Got {!r} in channel {!r}".format(msg, channel)) >>> asyncio.ensure_future(reader(mpsc)) >>> await redis.subscribe(mpsc.channel(\(aqchannel:1\(aq), \&... mpsc.channel(\(aqchannel:3\(aq)) \&... mpsc.channel(\(aqchannel:5\(aq)) >>> await redis.psubscribe(mpsc.pattern(\(aqhello\(aq)) >>> # publishing \(aqHello world\(aq into \(aqhello\-channel\(aq >>> # will print this message: Got b\(aqHello world\(aq in channel b\(aqhello\-channel\(aq >>> # when all is done: >>> await redis.unsubscribe(\(aqchannel:1\(aq, \(aqchannel:3\(aq, \(aqchannel:5\(aq) >>> await redis.punsubscribe(\(aqhello\(aq) >>> mpsc.stop() >>> # any message received after stop() will be ignored. .ft P .fi .sp To do: few words regarding exclusive channel usage. .INDENT 7.0 .TP .B channel(name) Create a channel. .sp Returns \fB_Sender\fP object implementing \fBAbcChannel\fP\&. .UNINDENT .INDENT 7.0 .TP .B channels Read\-only channels dict. .UNINDENT .INDENT 7.0 .TP .B coroutine get(*, encoding=None, decoder=None) Wait for and return pub/sub message from one of channels. .sp Return value is either: .INDENT 7.0 .IP \(bu 2 tuple of two elements: channel & message; .IP \(bu 2 tuple of three elements: pattern channel, (target channel & message); .IP \(bu 2 or None in case Receiver is not active or has just been stopped. .UNINDENT .INDENT 7.0 .TP .B Raises \fBaioredis.ChannelClosedError\fP – If listener is stopped and all messages have been received. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B is_active Returns True if listener has any active subscription. .UNINDENT .INDENT 7.0 .TP .B iter(*, encoding=None, decoder=None) Returns async iterator. .sp Usage example: .sp .nf .ft C >>> async for ch, msg in mpsc.iter(): \&... print(ch, msg) .ft P .fi .UNINDENT .INDENT 7.0 .TP .B pattern(pattern) Create a pattern channel. .sp Returns \fB_Sender\fP object implementing \fBAbcChannel\fP\&. .UNINDENT .INDENT 7.0 .TP .B patterns Read\-only patterns dict. .UNINDENT .INDENT 7.0 .TP .B stop() Stop receiving messages. .sp All new messages after this call will be ignored, so you must call unsubscribe before stopping this listener. .UNINDENT .INDENT 7.0 .TP .B coroutine wait_message() Blocks until new message appear. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.pubsub._Sender(receiver, name, is_pattern, *, loop) Write\-Only Channel. .sp Does not allow direct \fB\&.get()\fP calls. .sp Bases: \fBaioredis.abc.AbcChannel\fP .sp \fBNot to be used directly\fP, returned by \fI\%Receiver.channel()\fP or \fI\%Receiver.pattern()\fP calls. .UNINDENT .SH AIOREDIS.SENTINEL — SENTINEL CLIENT REFERENCE .sp This section contains reference for Redis Sentinel client. .sp Sample usage: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import aioredis sentinel = await aioredis\&.create_sentinel( [(\(aqsentinel.host1\(aq, 26379), (\(aqsentinel.host2\(aq, 26379)]) redis = sentinel\&.master_for(\(aqmymaster\(aq) assert await redis\&.set(\(aqkey\(aq, \(aqvalue\(aq) assert await redis\&.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) == \(aqvalue\(aq # redis client will reconnect/reconfigure automatically # by sentinel client instance .ft P .fi .UNINDENT .UNINDENT .SS \fBRedisSentinel\fP .INDENT 0.0 .TP .B coroutine aioredis.sentinel.create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None, loop=None) Creates Redis Sentinel client. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBsentinels\fP (\fI\%list\fP\fI[\fP\fI\%tuple\fP\fI]\fP) – A list of Sentinel node addresses. .IP \(bu 2 \fBdb\fP (\fI\%int\fP) – Redis database index to select for every master/slave connections. .IP \(bu 2 \fBpassword\fP (\fI\%str\fP\fI or \fP\fI\%bytes\fP\fI or \fP\fI\%None\fP) – Password to use if Redis server instance requires authorization. .IP \(bu 2 \fBencoding\fP (\fI\%str\fP\fI or \fP\fI\%None\fP) – Codec to use for response decoding. .IP \(bu 2 \fBminsize\fP (\fI\%int\fP) – Minimum number of connections (to master or slave) to initialize and keep in pool. Default is 1. .IP \(bu 2 \fBmaxsize\fP (\fI\%int\fP) – Maximum number of connections (to master or slave) that can be created in pool. Default is 10. .IP \(bu 2 \fBssl\fP (\fI\%ssl.SSLContext\fP or True or None) – SSL context that is passed through to \fBasyncio.BaseEventLoop.create_connection()\fP\&. .IP \(bu 2 \fBparser\fP (\fI\%callable\fP\fI or \fP\fI\%None\fP) – Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 \fBloop\fP (\fI\%EventLoop\fP) – An optional \fIevent loop\fP instance (uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Return type RedisSentinel .UNINDENT .UNINDENT .INDENT 0.0 .TP .B class aioredis.sentinel.RedisSentinel Redis Sentinel client. .sp The class provides interface to Redis Sentinel commands as well as few methods to acquire managed Redis clients, see below. .INDENT 7.0 .TP .B closed \fBTrue\fP if client is closed. .UNINDENT .INDENT 7.0 .TP .B master_for(name) Get \fBRedis\fP client to named master. The client is instantiated with special connections pool which is controlled by \fI\%SentinelPool\fP\&. \fBThis method is not a coroutine.\fP .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Service name. .TP .B Return type aioredis.Redis .UNINDENT .UNINDENT .INDENT 7.0 .TP .B slave_for(name) Get \fBRedis\fP client to named slave. The client is instantiated with special connections pool which is controlled by \fI\%SentinelPool\fP\&. \fBThis method is not a coroutine.\fP .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Service name. .TP .B Return type aioredis.Redis .UNINDENT .UNINDENT .INDENT 7.0 .TP .B execute(command, *args, **kwargs) Execute Sentinel command. Every command is prefixed with \fBSENTINEL\fP automatically. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B coroutine ping() Send PING to Sentinel instance. Currently the ping command will be sent to first sentinel in pool, this may change in future. .UNINDENT .INDENT 7.0 .TP .B master(name) Returns a dictionary containing the specified master’s state. Please refer to Redis documentation for more info on returned data. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B master_address(name) Returns a \fB(host, port)\fP pair for the given service name. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B masters() Returns a list of dictionaries containing all masters’ states. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B slaves(name) Returns a list of slaves for the given service name. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B sentinels(name) Returns a list of Sentinels for the given service name. .INDENT 7.0 .TP .B Return type \fI\%asyncio.Future\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B monitor(name, ip, port, quorum) Add a new master to be monitored by this Sentinel. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBname\fP (\fI\%str\fP) – Service name. .IP \(bu 2 \fBip\fP (\fI\%str\fP) – New node’s IP address. .IP \(bu 2 \fBport\fP (\fI\%int\fP) – Node’s TCP port. .IP \(bu 2 \fBquorum\fP (\fI\%int\fP) – Sentinel quorum. .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B remove(name) Remove a master from Sentinel’s monitoring. .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Service name .UNINDENT .UNINDENT .INDENT 7.0 .TP .B set(name, option, value) Set Sentinel monitoring parameter for a given master. Please refer to Redis documentation for more info on options. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBname\fP (\fI\%str\fP) – Master’s name. .IP \(bu 2 \fBoption\fP (\fI\%str\fP) – Monitoring option name. .IP \(bu 2 \fBvalue\fP (\fI\%str\fP) – Monitoring option value. .UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 .TP .B failover(name) Force a failover of a named master. .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Master’s name. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B check_quorum(name) Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Master’s name. .UNINDENT .UNINDENT .INDENT 7.0 .TP .B close() Close all opened connections. .UNINDENT .INDENT 7.0 .TP .B coroutine wait_closed() Wait until all connections are closed. .UNINDENT .UNINDENT .SS \fBSentinelPool\fP .sp \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 This API has not yet stabilized and may change in future releases. .UNINDENT .UNINDENT .INDENT 0.0 .TP .B coroutine aioredis.sentinel.create_sentinel_pool(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None, loop=None) Creates Sentinel connections pool. .UNINDENT .INDENT 0.0 .TP .B class aioredis.sentinel.SentinelPool Sentinel connections pool. .sp This pool manages both sentinel connections and Redis master/slave connections. .INDENT 7.0 .TP .B closed \fBTrue\fP if pool and all connections are closed. .UNINDENT .INDENT 7.0 .TP .B master_for(name) Returns a managed connections pool for requested service name. .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Service name. .TP .B Return type \fBManagedPool\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B slave_for(name) Returns a managed connections pool for requested service name. .INDENT 7.0 .TP .B Parameters \fBname\fP (\fI\%str\fP) – Service name. .TP .B Return type \fBManagedPool\fP .UNINDENT .UNINDENT .INDENT 7.0 .TP .B execute(command, *args, **kwargs) Execute Sentinel command. .UNINDENT .INDENT 7.0 .TP .B coroutine discover(timeout=0.2) Discover Sentinels and all monitored services within given timeout. .sp This will reset internal state of this pool. .UNINDENT .INDENT 7.0 .TP .B coroutine discover_master(service, timeout) Perform named master discovery. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBservice\fP (\fI\%str\fP) – Service name. .IP \(bu 2 \fBtimeout\fP (\fI\%float\fP) – Operation timeout .UNINDENT .TP .B Return type aioredis.RedisConnection .UNINDENT .UNINDENT .INDENT 7.0 .TP .B coroutine discover_slave(service, timeout) Perform slave discovery. .INDENT 7.0 .TP .B Parameters .INDENT 7.0 .IP \(bu 2 \fBservice\fP (\fI\%str\fP) – Service name. .IP \(bu 2 \fBtimeout\fP (\fI\%float\fP) – Operation timeout .UNINDENT .TP .B Return type aioredis.RedisConnection .UNINDENT .UNINDENT .INDENT 7.0 .TP .B close() Close all controlled connections (both to sentinel and redis). .UNINDENT .INDENT 7.0 .TP .B coroutine wait_closed() Wait until pool gets closed. .UNINDENT .UNINDENT .SH EXAMPLES OF AIOREDIS USAGE .sp Below is a list of examples from \fI\%aioredis/examples\fP (see for more). .sp Every example is a correct python program that can be executed. .SS Low\-level connection usage example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): conn = await aioredis.create_connection( \(aqredis://localhost\(aq, encoding=\(aqutf\-8\(aq) ok = await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqsome value\(aq) assert ok == \(aqOK\(aq, ok str_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) raw_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq, encoding=None) assert str_value == \(aqsome value\(aq assert raw_value == b\(aqsome value\(aq print(\(aqstr value:\(aq, str_value) print(\(aqraw value:\(aq, raw_value) # optionally close connection conn.close() await conn.wait_closed() if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SS Connections pool example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): pool = await aioredis.create_pool( \(aqredis://localhost\(aq, minsize=5, maxsize=10) with await pool as conn: # low\-level redis connection await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqvalue\(aq) val = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) print(\(aqraw value:\(aq, val) pool.close() await pool.wait_closed() # closing all open connections if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SS Commands example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): # Redis client bound to single connection (no auto reconnection). redis = await aioredis.create_redis( \(aqredis://localhost\(aq) await redis.set(\(aqmy\-key\(aq, \(aqvalue\(aq) val = await redis.get(\(aqmy\-key\(aq) print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() async def redis_pool(): # Redis client bound to pool of connections (auto\-reconnecting). redis = await aioredis.create_redis_pool( \(aqredis://localhost\(aq) await redis.set(\(aqmy\-key\(aq, \(aqvalue\(aq) val = await redis.get(\(aqmy\-key\(aq) print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) asyncio.get_event_loop().run_until_complete(redis_pool()) .ft P .fi .UNINDENT .UNINDENT .SS Transaction example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): redis = await aioredis.create_redis( \(aqredis://localhost\(aq) await redis.delete(\(aqfoo\(aq, \(aqbar\(aq) tr = redis.multi_exec() fut1 = tr.incr(\(aqfoo\(aq) fut2 = tr.incr(\(aqbar\(aq) res = await tr.execute() res2 = await asyncio.gather(fut1, fut2) print(res) assert res == res2 redis.close() await redis.wait_closed() if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SS Pub/Sub example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def reader(ch): while (await ch.wait_message()): msg = await ch.get_json() print("Got Message:", msg) async def main(): pub = await aioredis.create_redis( \(aqredis://localhost\(aq) sub = await aioredis.create_redis( \(aqredis://localhost\(aq) res = await sub.subscribe(\(aqchan:1\(aq) ch1 = res[0] tsk = asyncio.ensure_future(reader(ch1)) res = await pub.publish_json(\(aqchan:1\(aq, ["Hello", "world"]) assert res == 1 await sub.unsubscribe(\(aqchan:1\(aq) await tsk sub.close() pub.close() if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SS Scan command example .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): """Scan command example.""" redis = await aioredis.create_redis( \(aqredis://localhost\(aq) await redis.mset(\(aqkey:1\(aq, \(aqvalue1\(aq, \(aqkey:2\(aq, \(aqvalue2\(aq) cur = b\(aq0\(aq # set initial cursor to 0 while cur: cur, keys = await redis.scan(cur, match=\(aqkey:*\(aq) print("Iteration results:", keys) redis.close() await redis.wait_closed() if __name__ == \(aq__main__\(aq: import os if \(aqredis_version:2.6\(aq not in os.environ.get(\(aqREDIS_VERSION\(aq, \(aq\(aq): asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SS Sentinel client .sp \fBget source code\fP .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C import asyncio import aioredis async def main(): sentinel_client = await aioredis.create_sentinel( [(\(aqlocalhost\(aq, 26379)]) master_redis = sentinel_client.master_for(\(aqmymaster\(aq) info = await master_redis.role() print("Master role:", info) assert info.role == \(aqmaster\(aq sentinel_client.close() await sentinel_client.wait_closed() if __name__ == \(aq__main__\(aq: asyncio.get_event_loop().run_until_complete(main()) .ft P .fi .UNINDENT .UNINDENT .SH CONTRIBUTING .sp To start contributing you must read all the following. .sp First you must fork/clone repo from \fI\%github\fP: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ git clone git@github.com:aio\-libs/aioredis.git .ft P .fi .UNINDENT .UNINDENT .sp Next, you should install all python dependencies, it is as easy as running single command: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ make devel .ft P .fi .UNINDENT .UNINDENT .sp this command will install: .INDENT 0.0 .IP \(bu 2 \fBsphinx\fP for building documentation; .IP \(bu 2 \fBpytest\fP for running tests; .IP \(bu 2 \fBflake8\fP for code linting; .IP \(bu 2 and few other packages. .UNINDENT .SS Code style .sp Code \fBmust\fP be pep8 compliant. .sp You can check it with following command: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ make flake .ft P .fi .UNINDENT .UNINDENT .SS Running tests .sp You can run tests in any of the following ways: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # will run tests in a verbose mode $ make test # or $ py.test # will run tests with coverage report $ make cov # or $ py.test \-\-cov .ft P .fi .UNINDENT .UNINDENT .SS SSL tests .sp Running SSL tests requires following additional programs to be installed: .INDENT 0.0 .IP \(bu 2 \fBopenssl\fP – to generate test key and certificate; .IP \(bu 2 \fBsocat\fP – to make SSL proxy; .UNINDENT .sp To install these on Ubuntu and generate test key & certificate run: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ sudo apt\-get install socat openssl $ make certificate .ft P .fi .UNINDENT .UNINDENT .SS Different Redis server versions .sp To run tests against different redises use \fB\-\-redis\-server\fP command line option: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ py.test \-\-redis\-server=/path/to/custom/redis\-server .ft P .fi .UNINDENT .UNINDENT .SS UVLoop .sp To run tests with uvloop: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ pip install uvloop $ py.test \-\-uvloop .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Until Python 3.5.2 EventLoop has no \fBcreate_future\fP method so aioredis won’t benefit from uvloop’s futures. .UNINDENT .UNINDENT .SS Writing tests .sp \fBaioredis\fP uses pytest tool. .sp Tests are located under \fB/tests\fP directory. .sp Pure Python 3.5 tests (ie the ones using \fBasync\fP/\fBawait\fP syntax) must be prefixed with \fBpy35_\fP, for instance see: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C tests/py35_generic_commands_tests.py tests/py35_pool_test.py .ft P .fi .UNINDENT .UNINDENT .SS Fixtures .sp There is a number of fixtures that can be used to write tests: .INDENT 0.0 .TP .B loop Current event loop used for test. This is a function\-scope fixture. Using this fixture will always create new event loop and set global one to None. .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C def test_with_loop(loop): @asyncio.coroutine def do_something(): pass loop.run_until_complete(do_something()) .ft P .fi .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B unused_port() Finds and returns free TCP port. .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C def test_bind(unused_port): port = unused_port() assert 1024 < port <= 65535 .ft P .fi .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B coroutine create_connection(*args, **kw) Wrapper around \fBaioredis.create_connection()\fP\&. Only difference is that it registers connection to be closed after test case, so you should not be worried about unclosed connections. .UNINDENT .INDENT 0.0 .TP .B coroutine create_redis(*args, **kw) Wrapper around \fBaioredis.create_redis()\fP\&. .UNINDENT .INDENT 0.0 .TP .B coroutine create_pool(*args, **kw) Wrapper around \fBaioredis.create_pool()\fP\&. .UNINDENT .INDENT 0.0 .TP .B redis Redis client instance. .UNINDENT .INDENT 0.0 .TP .B pool RedisPool instance. .UNINDENT .INDENT 0.0 .TP .B server Redis server instance info. Namedtuple with following properties: .INDENT 7.0 .INDENT 3.5 .INDENT 0.0 .TP .B name server instance name. .TP .B port Bind port. .TP .B unixsocket Bind unixsocket path. .TP .B version Redis server version tuple. .UNINDENT .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B serverB Second predefined Redis server instance info. .UNINDENT .INDENT 0.0 .TP .B start_server(name) Start Redis server instance. Redis instances are cached by name. .INDENT 7.0 .TP .B Returns server info tuple, see \fI\%server\fP\&. .TP .B Return type \fI\%tuple\fP .UNINDENT .UNINDENT .INDENT 0.0 .TP .B ssl_proxy(unsecure_port) Start SSL proxy. .INDENT 7.0 .TP .B Parameters \fBunsecure_port\fP (\fI\%int\fP) – Redis server instance port .TP .B Returns secure_port and ssl_context pair .TP .B Return type \fI\%tuple\fP .UNINDENT .UNINDENT .SS Helpers .sp \fBaioredis\fP also updates pytest’s namespace with several helpers. .INDENT 0.0 .TP .B pytest.redis_version(*version, reason) Marks test with minimum redis version to run. .sp Example: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") def test_hstrlen(redis): pass .ft P .fi .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B pytest.logs(logger, level=None) Adopted version of \fI\%unittest.TestCase.assertEqual()\fP, see it for details. .sp Example: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C def test_logs(create_connection, server): with pytest.logs(\(aqaioredis\(aq, \(aqDEBUG\(aq) as cm: conn yield from create_connection(server.tcp_address) assert cm.output[0].startswith( \(aqDEBUG:aioredis:Creating tcp connection\(aq) .ft P .fi .UNINDENT .UNINDENT .UNINDENT .INDENT 0.0 .TP .B pytest.assert_almost_equal(first, second, places=None, msg=None, delta=None) Adopted version of \fI\%unittest.TestCase.assertAlmostEqual()\fP\&. .UNINDENT .INDENT 0.0 .TP .B pytest.raises_regex(exc_type, message) Adopted version of \fI\%unittest.TestCase.assertRaisesRegex()\fP\&. .UNINDENT .SH RELEASES .SS Recent .SS 1.0.0 (2017\-11\-17) .sp \fBNEW\fP: .INDENT 0.0 .IP \(bu 2 \fBImportant!\fP Drop Python 3.3, 3.4 support; (see \fI\%#321\fP, \fI\%#323\fP and \fI\%#326\fP); .IP \(bu 2 \fBImportant!\fP Connections pool has been refactored; now \fBcreate_redis\fP function will yield \fBRedis\fP instance instead of \fBRedisPool\fP (see \fI\%#129\fP); .IP \(bu 2 \fBImportant!\fP Change sorted set commands reply format: return list of tuples instead of plain list for commands accepting \fBwithscores\fP argument (see \fI\%#334\fP); .IP \(bu 2 \fBImportant!\fP Change \fBhscan\fP command reply format: return list of tuples instead of mixed key\-value list (see \fI\%#335\fP); .IP \(bu 2 Implement Redis URI support as supported \fBaddress\fP argument value (see \fI\%#322\fP); .IP \(bu 2 Dropped \fBcreate_reconnecting_redis\fP, \fBcreate_redis_pool\fP should be used instead; .IP \(bu 2 Implement custom \fBStreamReader\fP (see \fI\%#273\fP); .IP \(bu 2 Implement Sentinel support (see \fI\%#181\fP); .IP \(bu 2 Implement pure\-python parser (see \fI\%#212\fP); .IP \(bu 2 Add \fBmigrate_keys\fP command (see \fI\%#187\fP); .IP \(bu 2 Add \fBzrevrangebylex\fP command (see \fI\%#201\fP); .IP \(bu 2 Add \fBcommand\fP, \fBcommand_count\fP, \fBcommand_getkeys\fP and \fBcommand_info\fP commands (see \fI\%#229\fP); .IP \(bu 2 Add \fBping\fP support in pubsub connection (see \fI\%#264\fP); .IP \(bu 2 Add \fBexist\fP parameter to \fBzadd\fP command (see \fI\%#288\fP); .IP \(bu 2 Add \fBMaxClientsError\fP and implement \fBReplyError\fP specialization (see \fI\%#325\fP); .IP \(bu 2 Add \fBencoding\fP parameter to sorted set commands (see \fI\%#289\fP); .UNINDENT .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Fix \fBCancelledError\fP in \fBconn._reader_task\fP (see \fI\%#301\fP); .IP \(bu 2 Fix pending commands cancellation with \fBCancelledError\fP, use explicit exception instead of calling \fBcancel()\fP method (see \fI\%#316\fP); .IP \(bu 2 Correct error message on Sentinel discovery of master/slave with password (see \fI\%#327\fP); .IP \(bu 2 Fix \fBbytearray\fP support as command argument (see \fI\%#329\fP); .IP \(bu 2 Fix critical bug in patched asyncio.Lock (see \fI\%#256\fP); .IP \(bu 2 Fix Multi/Exec transaction canceled error (see \fI\%#225\fP); .IP \(bu 2 Add missing arguments to \fBcreate_redis\fP and \fBcreate_redis_pool\fP; .IP \(bu 2 Fix deprecation warning (see \fI\%#191\fP); .IP \(bu 2 Make correct \fB__aiter__()\fP (see \fI\%#192\fP); .IP \(bu 2 Backward compatibility fix for \fBwith (yield from pool) as conn:\fP (see \fI\%#205\fP); .IP \(bu 2 Fixed pubsub receiver stop() (see \fI\%#211\fP); .UNINDENT .sp \fBMISC\fP: .INDENT 0.0 .IP \(bu 2 Multiple test fixes; .IP \(bu 2 Add PyPy3 to build matrix; .IP \(bu 2 Update dependencies versions; .IP \(bu 2 Add missing Python 3.6 classifier; .UNINDENT .SS 0.3.5 (2017\-11\-08) .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Fix for indistinguishable futures cancellation with \fBasyncio.CancelledError\fP (see \fI\%#316\fP), cherry\-picked from master; .UNINDENT .SS 0.3.4 (2017\-10\-25) .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Fix time command result decoding when using connection\-wide encoding setting (see \fI\%#266\fP); .UNINDENT .SS 0.3.3 (2017\-06\-30) .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Critical bug fixed in patched asyncio.Lock (see \fI\%#256\fP); .UNINDENT .SS 0.3.2 (2017\-06\-21) .sp \fBNEW\fP: .INDENT 0.0 .IP \(bu 2 Added \fBzrevrangebylex\fP command (see \fI\%#201\fP), cherry\-picked from master; .IP \(bu 2 Add connection timeout (see \fI\%#221\fP), cherry\-picked from master; .UNINDENT .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Fixed pool close warning (see \fI\%#239\fP and \fI\%#236\fP), cherry\-picked from master; .IP \(bu 2 Fixed asyncio Lock deadlock issue (see \fI\%#231\fP and \fI\%#241\fP); .UNINDENT .SS 0.3.1 (2017\-05\-09) .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Fix pubsub Receiver missing iter() method (see \fI\%#203\fP); .UNINDENT .SS 0.3.0 (2017\-01\-11) .sp \fBNEW\fP: .INDENT 0.0 .IP \(bu 2 Pub/Sub connection commands accept \fBChannel\fP instances (see \fI\%#168\fP); .IP \(bu 2 Implement new Pub/Sub MPSC (multi\-producers, single\-consumer) Queue – \fBaioredis.pubsub.Receiver\fP (see \fI\%#176\fP); .IP \(bu 2 Add \fBaioredis.abc\fP module providing abstract base classes defining interface for basic lib components; (see \fI\%#176\fP); .IP \(bu 2 Implement Geo commands support (see \fI\%#177\fP and \fI\%#179\fP); .UNINDENT .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Minor tests fixes; .UNINDENT .sp \fBMISC\fP: .INDENT 0.0 .IP \(bu 2 Update examples and docs to use \fBasync\fP/\fBawait\fP syntax also keeping \fByield from\fP examples for history (see \fI\%#173\fP); .IP \(bu 2 Reflow Travis CI configuration; add Python 3.6 section (see \fI\%#170\fP); .IP \(bu 2 Add AppVeyor integration to run tests on Windows (see \fI\%#180\fP); .IP \(bu 2 Update multiple development requirements; .UNINDENT .sp .ce ---- .ce 0 .sp .SS Historical .SS 0.2.9 (2016\-10\-24) .sp \fBNEW\fP: .INDENT 0.0 .IP \(bu 2 Allow multiple keys in \fBEXISTS\fP command (see \fI\%#156\fP and \fI\%#157\fP); .UNINDENT .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Close RedisPool when connection to Redis failed (see \fI\%#136\fP); .IP \(bu 2 Add simple \fBINFO\fP command argument validation (see \fI\%#140\fP); .IP \(bu 2 Remove invalid uses of \fBnext()\fP .UNINDENT .sp \fBMISC\fP: .INDENT 0.0 .IP \(bu 2 Update devel.rst docs; update Pub/Sub Channel docs (cross\-refs); .IP \(bu 2 Update MANIFEST.in to include docs, examples and tests in source bundle; .UNINDENT .SS 0.2.8 (2016\-07\-22) .sp \fBNEW\fP: .INDENT 0.0 .IP \(bu 2 Add \fBhmset_dict\fP command (see \fI\%#130\fP); .IP \(bu 2 Add \fBRedisConnection.address\fP property; .IP \(bu 2 RedisPool \fBminsize\fP/\fBmaxsize\fP must not be \fBNone\fP; .IP \(bu 2 Implement \fBclose()\fP/\fBwait_closed()\fP/\fBclosed\fP interface for pool (see \fI\%#128\fP); .UNINDENT .sp \fBFIX\fP: .INDENT 0.0 .IP \(bu 2 Add test for \fBhstrlen\fP; .IP \(bu 2 Test fixes .UNINDENT .sp \fBMISC\fP: .INDENT 0.0 .IP \(bu 2 Enable Redis 3.2.0 on Travis; .IP \(bu 2 Add spell checking when building docs (see \fI\%#132\fP); .IP \(bu 2 Documentation updated; .UNINDENT .SS 0.2.7 (2016\-05\-27) .INDENT 0.0 .IP \(bu 2 \fBcreate_pool()\fP minsize default value changed to 1; .IP \(bu 2 Fixed cancellation of wait_closed (see \fI\%#118\fP); .IP \(bu 2 Fixed \fBtime()\fP convertion to float (see \fI\%#126\fP); .IP \(bu 2 Fixed \fBhmset()\fP method to return bool instead of \fBb\(aqOK\(aq\fP (see \fI\%#126\fP); .IP \(bu 2 Fixed multi/exec + watch issue (changed watch variable was causing \fBtr.execute()\fP to fail) (see \fI\%#121\fP); .IP \(bu 2 Replace \fBasyncio.Future\fP uses with utility method (get ready to Python 3.5.2 \fBloop.create_future()\fP); .IP \(bu 2 Tests switched from unittest to pytest (see \fI\%#126\fP); .IP \(bu 2 Documentation updates; .UNINDENT .SS 0.2.6 (2016\-03\-30) .INDENT 0.0 .IP \(bu 2 Fixed Multi/Exec transactions cancellation issue (see \fI\%#110\fP and \fI\%#114\fP); .IP \(bu 2 Fixed Pub/Sub subscribe concurrency issue (see \fI\%#113\fP and \fI\%#115\fP); .IP \(bu 2 Add SSL/TLS support (see \fI\%#116\fP); .IP \(bu 2 \fBaioredis.ConnectionClosedError\fP raised in \fBexecute_pubsub\fP as well (see \fI\%#108\fP); .IP \(bu 2 \fBRedis.slaveof()\fP method signature changed: now to disable replication one should call \fBredis.slaveof(None)\fP instead of \fBredis.slaveof()\fP; .IP \(bu 2 More tests added; .UNINDENT .SS 0.2.5 (2016\-03\-02) .INDENT 0.0 .IP \(bu 2 Close all Pub/Sub channels on connection close (see \fI\%#88\fP); .IP \(bu 2 Add \fBiter()\fP method to \fBaioredis.Channel\fP allowing to use it with \fBasync for\fP (see \fI\%#89\fP); .IP \(bu 2 Inline code samples in docs made runnable and downloadable (see \fI\%#92\fP); .IP \(bu 2 Python 3.5 examples converted to use \fBasync\fP/\fBawait\fP syntax (see \fI\%#93\fP); .IP \(bu 2 Fix Multi/Exec to honor encoding parameter (see \fI\%#94\fP and \fI\%#97\fP); .IP \(bu 2 Add debug message in \fBcreate_connection\fP (see \fI\%#90\fP); .IP \(bu 2 Replace \fBasyncio.async\fP calls with wrapper that respects asyncio version (see \fI\%#101\fP); .IP \(bu 2 Use NODELAY option for TCP sockets (see \fI\%#105\fP); .IP \(bu 2 New \fBaioredis.ConnectionClosedError\fP exception added. Raised if connection to Redis server is lost (see \fI\%#108\fP and \fI\%#109\fP); .IP \(bu 2 Fix RedisPool to close and drop connection in subscribe mode on release; .IP \(bu 2 Fix \fBaioredis.util.decode\fP to recursively decode list responses; .IP \(bu 2 More examples added and docs updated; .IP \(bu 2 Add google groups link to README; .IP \(bu 2 Bump year in LICENSE and docs; .UNINDENT .SS 0.2.4 (2015\-10\-13) .INDENT 0.0 .IP \(bu 2 Python 3.5 \fBasync\fP support: .INDENT 2.0 .IP \(bu 2 New scan commands API (\fBiscan\fP, \fBizscan\fP, \fBihscan\fP); .IP \(bu 2 Pool made awaitable (allowing \fBwith await pool: ...\fP and \fBasync with pool.get() as conn:\fP constructs); .UNINDENT .IP \(bu 2 Fixed dropping closed connections from free pool (see \fI\%#83\fP); .IP \(bu 2 Docs updated; .UNINDENT .SS 0.2.3 (2015\-08\-14) .INDENT 0.0 .IP \(bu 2 Redis cluster support work in progress; .IP \(bu 2 Fixed pool issue causing pool growth over max size & \fBacquire\fP call hangs (see \fI\%#71\fP); .IP \(bu 2 \fBinfo\fP server command result parsing implemented; .IP \(bu 2 Fixed behavior of util functions (see \fI\%#70\fP); .IP \(bu 2 \fBhstrlen\fP command added; .IP \(bu 2 Few fixes in examples; .IP \(bu 2 Few fixes in documentation; .UNINDENT .SS 0.2.2 (2015\-07\-07) .INDENT 0.0 .IP \(bu 2 Decoding data with \fBencoding\fP parameter now takes into account list (array) replies (see \fI\%#68\fP); .IP \(bu 2 \fBencoding\fP parameter added to following commands: .INDENT 2.0 .IP \(bu 2 generic commands: keys, randomkey; .IP \(bu 2 hash commands: hgetall, hkeys, hmget, hvals; .IP \(bu 2 list commands: blpop, brpop, brpoplpush, lindex, lpop, lrange, rpop, rpoplpush; .IP \(bu 2 set commands: smembers, spop, srandmember; .IP \(bu 2 string commands: getrange, getset, mget; .UNINDENT .IP \(bu 2 Backward incompatibility: .sp \fBltrim\fP command now returns bool value instead of ‘OK’; .IP \(bu 2 Tests updated; .UNINDENT .SS 0.2.1 (2015\-07\-06) .INDENT 0.0 .IP \(bu 2 Logging added (aioredis.log module); .IP \(bu 2 Fixed issue with \fBwait_message\fP in pub/sub (see \fI\%#66\fP); .UNINDENT .SS 0.2.0 (2015\-06\-04) .INDENT 0.0 .IP \(bu 2 Pub/Sub support added; .IP \(bu 2 Fix in \fBzrevrangebyscore\fP command (see \fI\%#62\fP); .IP \(bu 2 Fixes/tests/docs; .UNINDENT .SS 0.1.5 (2014\-12\-09) .INDENT 0.0 .IP \(bu 2 AutoConnector added; .IP \(bu 2 wait_closed method added for clean connections shutdown; .IP \(bu 2 \fBzscore\fP command fixed; .IP \(bu 2 Test fixes; .UNINDENT .SS 0.1.4 (2014\-09\-22) .INDENT 0.0 .IP \(bu 2 Dropped following Redis methods – \fBRedis.multi()\fP, \fBRedis.exec()\fP, \fBRedis.discard()\fP; .IP \(bu 2 \fBRedis.multi_exec\fP hack’ish property removed; .IP \(bu 2 \fBRedis.multi_exec()\fP method added; .IP \(bu 2 High\-level commands implemented: .INDENT 2.0 .IP \(bu 2 generic commands (tests); .IP \(bu 2 transactions commands (api stabilization). .UNINDENT .IP \(bu 2 Backward incompatibilities: .INDENT 2.0 .IP \(bu 2 Following sorted set commands’ API changed: .sp \fBzcount\fP, \fBzrangebyscore\fP, \fBzremrangebyscore\fP, \fBzrevrangebyscore\fP; .IP \(bu 2 set string command’ API changed; .UNINDENT .UNINDENT .SS 0.1.3 (2014\-08\-08) .INDENT 0.0 .IP \(bu 2 RedisConnection.execute refactored to support commands pipelining (see \fI\%#33\fP); .IP \(bu 2 Several fixes; .IP \(bu 2 WIP on transactions and commands interface; .IP \(bu 2 High\-level commands implemented and tested: .INDENT 2.0 .IP \(bu 2 hash commands; .IP \(bu 2 hyperloglog commands; .IP \(bu 2 set commands; .IP \(bu 2 scripting commands; .IP \(bu 2 string commands; .IP \(bu 2 list commands; .UNINDENT .UNINDENT .SS 0.1.2 (2014\-07\-31) .INDENT 0.0 .IP \(bu 2 \fBcreate_connection\fP, \fBcreate_pool\fP, \fBcreate_redis\fP functions updated: db and password arguments made keyword\-only (see \fI\%#26\fP); .IP \(bu 2 Fixed transaction handling (see \fI\%#32\fP); .IP \(bu 2 Response decoding (see \fI\%#16\fP); .UNINDENT .SS 0.1.1 (2014\-07\-07) .INDENT 0.0 .IP \(bu 2 Transactions support (in connection, high\-level commands have some issues); .IP \(bu 2 Docs & tests updated. .UNINDENT .SS 0.1.0 (2014\-06\-24) .INDENT 0.0 .IP \(bu 2 Initial release; .IP \(bu 2 RedisConnection implemented; .IP \(bu 2 RedisPool implemented; .IP \(bu 2 Docs for RedisConnection & RedisPool; .IP \(bu 2 WIP on high\-level API. .UNINDENT .SH GLOSSARY .INDENT 0.0 .TP .B asyncio Reference implementation of \fI\%PEP 3156\fP .sp See \fI\%https://pypi.python.org/pypi/asyncio\fP .TP .B error replies Redis server replies that start with \- (minus) char. Usually starts with \fB\-ERR\fP\&. .TP .B hiredis Python extension that wraps protocol parsing code in \fI\%hiredis\fP\&. .sp See \fI\%https://pypi.python.org/pypi/hiredis\fP .TP .B pytest A mature full\-featured Python testing tool. See \fI\%http://pytest.org/latest/\fP .TP .B uvloop Is an ultra fast implementation of asyncio event loop on top of libuv. See \fI\%https://github.com/MagicStack/uvloop\fP .UNINDENT .INDENT 0.0 .IP \(bu 2 genindex .IP \(bu 2 modindex .IP \(bu 2 search .UNINDENT .SH AUTHOR Alexey Popravka .SH COPYRIGHT 2014-2017, Alexey Popravka .\" Generated by docutils manpage writer. . aioredis-1.0.0/docs/examples.rst0000644000175000017500000000237513203624357017423 0ustar alexeyalexey00000000000000Examples of aioredis usage ========================== Below is a list of examples from `aioredis/examples `_ (see for more). Every example is a correct python program that can be executed. .. _aioredis-examples-simple: Low-level connection usage example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :download:`get source code<../examples/connection.py>` .. literalinclude:: ../examples/connection.py Connections pool example ~~~~~~~~~~~~~~~~~~~~~~~~ :download:`get source code<../examples/pool.py>` .. literalinclude:: ../examples/pool.py Commands example ~~~~~~~~~~~~~~~~ :download:`get source code<../examples/commands.py>` .. literalinclude:: ../examples/commands.py Transaction example ~~~~~~~~~~~~~~~~~~~ :download:`get source code<../examples/transaction.py>` .. literalinclude:: ../examples/transaction.py Pub/Sub example ~~~~~~~~~~~~~~~ :download:`get source code<../examples/pubsub.py>` .. literalinclude:: ../examples/pubsub.py Scan command example ~~~~~~~~~~~~~~~~~~~~ :download:`get source code<../examples/scan.py>` .. literalinclude:: ../examples/scan.py Sentinel client ~~~~~~~~~~~~~~~ :download:`get source code<../examples/sentinel.py>` .. literalinclude:: ../examples/sentinel.py aioredis-1.0.0/docs/migration.rst0000644000175000017500000003110413203624357017566 0ustar alexeyalexey00000000000000Migrating from v0.3 to v1.0 =========================== .. contents:: API changes and backward incompatible changes: :local: ---- aioredis.create_pool -------------------- :func:`~aioredis.create_pool` now returns :class:`~aioredis.ConnectionsPool` instead of ``RedisPool``. This means that pool now operates with :class:`~aioredis.RedisConnection` objects and not :class:`~aioredis.Redis`. +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v0.3 | :emphasize-lines: 5 | | | | | | pool = await aioredis.create_pool(('localhost', 6379)) | | | | | | with await pool as redis: | | | # calling methods of Redis class | | | await redis.lpush('list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v1.0 | :emphasize-lines: 5 | | | | | | pool = await aioredis.create_pool(('localhost', 6379)) | | | | | | with await pool as conn: | | | # calling conn.lpush will raise AttributeError exception | | | await conn.execute('lpush', 'list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ aioredis.create_reconnecting_redis ---------------------------------- :func:`~aioredis.create_reconnecting_redis` has been dropped. :func:`~aioredis.create_redis_pool` can be used instead of former function. +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v0.3 | :emphasize-lines: 1 | | | | | | redis = await aioredis.create_reconnecting_redis( | | | ('localhost', 6379)) | | | | | | await redis.lpush('list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v1.0 | :emphasize-lines: 1 | | | | | | redis = await aioredis.create_redis_pool( | | | ('localhost', 6379)) | | | | | | await redis.lpush('list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ ``create_redis_pool`` returns :class:`~aioredis.Redis` initialized with ``ConnectionsPool`` which is responsible for reconnecting to server. Also ``create_reconnecting_redis`` was patching the ``RedisConnection`` and breaking ``closed`` property (it was always ``True``). aioredis.Redis -------------- :class:`~aioredis.Redis` class now operates with objects implementing :class:`aioredis.abc.AbcConnection` interface. :class:`~aioredis.RedisConnection` and :class:`~aioredis.ConnectionsPool` are both implementing ``AbcConnection`` so it is become possible to use same API when working with either single connection or connections pool. +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v0.3 | :emphasize-lines: 5 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.lpush('list-key', 'item1', 'item2') | | | | | | pool = await aioredis.create_pool(('localhost', 6379)) | | | redis = await pool.acquire() # get Redis object | | | await redis.lpush('list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v1.0 | :emphasize-lines: 2,5 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.lpush('list-key', 'item1', 'item2') | | | | | | redis = await aioredis.create_redis_pool(('localhost', 6379)) | | | await redis.lpush('list-key', 'item1', 'item2') | | | | +--------+--------------------------------------------------------------------+ Blocking operations and connection sharing ------------------------------------------ Current implementation of ``ConnectionsPool`` by default **execute every command on random connection**. The *Pros* of this is that it allowed implementing ``AbcConnection`` interface and hide pool inside ``Redis`` class, and also keep pipelining feature (like RedisConnection.execute). The *Cons* of this is that **different tasks may use same connection and block it** with some long-running command. We can call it **Shared Mode** --- commands are sent to random connections in pool without need to lock [connection]: .. code-block:: python3 redis = await aioredis.create_redis_pool( ('localhost', 6379), minsize=1, maxsize=1) async def task(): # Shared mode await redis.set('key', 'val') asyncio.ensure_future(task()) asyncio.ensure_future(task()) # Both tasks will send commands through same connection # without acquiring (locking) it first. Blocking operations (like ``blpop``, ``brpop`` or long-running LUA scripts) in **shared mode** mode will block connection and thus may lead to whole program malfunction. This *blocking* issue can be easily solved by using exclusive connection for such operations: .. code-block:: python3 :emphasize-lines: 8 redis = await aioredis.create_redis_pool( ('localhost', 6379), minsize=1, maxsize=1) async def task(): # Exclusive mode with await redis as r: await r.set('key', 'val') asyncio.ensure_future(task()) asyncio.ensure_future(task()) # Both tasks will first acquire connection. We can call this **Exclusive Mode** --- context manager is used to acquire (lock) exclusive connection from pool and send all commands through it. .. note:: This technique is similar to v0.3 pool usage: .. code-block:: python3 # in aioredis v0.3 pool = await aioredis.create_pool(('localhost', 6379)) with await pool as redis: # Redis is bound to exclusive connection redis.set('key', 'val') Sorted set commands return values --------------------------------- Sorted set commands (like ``zrange``, ``zrevrange`` and others) that accept ``withscores`` argument now **return list of tuples** instead of plain list. +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v0.3 | :emphasize-lines: 4,7-8 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | | | res = await redis.zrage('zset-key', withscores=True) | | | assert res == [b'one', 1, b'two', 2] | | | | | | # not an esiest way to make a dict | | | it = iter(res) | | | assert dict(zip(it, it)) == {b'one': 1, b'two': 2} | | | | +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v1.0 | :emphasize-lines: 4,7 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | | | res = await redis.zrage('zset-key', withscores=True) | | | assert res == [(b'one', 1), (b'two', 2)] | | | | | | # now its easier to make a dict of it | | | assert dict(res) == {b'one': 1, b'two': 2} | | | | +--------+--------------------------------------------------------------------+ Hash ``hscan`` command now returns list of tuples ------------------------------------------------- ``hscan`` updated to return a list of tuples instead of plain mixed key/value list. +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v0.3 | :emphasize-lines: 4,7-8 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.hmset('hash', 'one', 1, 'two', 2) | | | cur, data = await redis.hscan('hash') | | | assert data == [b'one', b'1', b'two', b'2'] | | | | | | # not an esiest way to make a dict | | | it = iter(data) | | | assert dict(zip(it, it)) == {b'one': b'1', b'two': b'2'} | | | | +--------+--------------------------------------------------------------------+ | | .. code-block:: python3 | | v1.0 | :emphasize-lines: 4,7 | | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.hmset('hash', 'one', 1, 'two', 2) | | | cur, data = await redis.hscan('hash') | | | assert data == [(b'one', b'1'), (b'two', b'2')] | | | | | | # now its easier to make a dict of it | | | assert dict(data) == {b'one': b'1': b'two': b'2'} | | | | +--------+--------------------------------------------------------------------+ aioredis-1.0.0/docs/mpsc.rst0000644000175000017500000000100013203624357016527 0ustar alexeyalexey00000000000000.. module:: aioredis.pubsub :mod:`aioredis.pubsub` --- Pub/Sub Tools Reference ================================================== Module provides a Pub/Sub listener interface implementing multi-producers, single-consumer queue pattern. .. autoclass:: Receiver :members: To do: few words regarding exclusive channel usage. .. autoclass:: _Sender Bases: :class:`aioredis.abc.AbcChannel` **Not to be used directly**, returned by :meth:`Receiver.channel` or :meth:`Receiver.pattern()` calls. aioredis-1.0.0/docs/abc.rst0000664000175000017500000000065713036463350016333 0ustar alexeyalexey00000000000000.. module:: aioredis.abc :mod:`aioredis.abc` --- Interfaces Reference ============================================ This module defines several abstract classes that must be used when implementing custom connection managers or other features. .. autoclass:: AbcConnection :show-inheritance: :members: .. autoclass:: AbcPool :show-inheritance: :members: .. autoclass:: AbcChannel :show-inheritance: :members: aioredis-1.0.0/docs/mixins.rst0000644000175000017500000001171013203624357017105 0ustar alexeyalexey00000000000000.. _aioredis-commands: :class:`aioredis.Redis` --- Commands Mixins Reference ===================================================== .. module:: aioredis.commands This section contains reference for mixins implementing Redis commands. Descriptions are taken from ``docstrings`` so may not contain proper markup. .. autoclass:: aioredis.Redis :members: :param pool_or_conn: Can be either :class:`~aioredis.RedisConnection` or :class:`~aioredis.ConnectionsPool`. :type pool_or_conn: :class:`~aioredis.abc.AbcConnection` Generic commands ---------------- .. autoclass:: GenericCommandsMixin :members: Geo commands ------------ .. versionadded:: v0.3.0 .. autoclass:: GeoCommandsMixin :members: Geo commands result wrappers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. class:: GeoPoint(longitude, latitude) Bases: :class:`tuple` Named tuple representing result returned by ``GEOPOS`` and ``GEORADIUS`` commands. :param float longitude: longitude value. :param float latitude: latitude value. .. class:: GeoMember(member, dist, hash, coord) Bases: :class:`tuple` Named tuple representing result returned by ``GEORADIUS`` and ``GEORADIUSBYMEMBER`` commands. :param member: Value of geo sorted set item; :type member: str or bytes :param dist: Distance in units passed to call. :class:`None` if ``with_dist`` was not set in :meth:`~GeoCommandsMixin.georadius` call. :type dist: None or float :param hash: Geo-hash represented as number. :class:`None` if ``with_hash`` was not in :meth:`~GeoCommandsMixin.georadius` call. :type hash: None or int :param coord: Coordinate of geospatial index member. :class:`None` if ``with_coord`` was not set in :meth:`~GeoCommandsMixin.georadius` call. :type coord: None or GeoPoint Strings commands ---------------- .. autoclass:: StringCommandsMixin :members: Hash commands ------------- .. autoclass:: HashCommandsMixin :members: List commands ------------- .. autoclass:: ListCommandsMixin :members: Set commands ------------ .. autoclass:: SetCommandsMixin :members: Sorted Set commands ------------------- .. autoclass:: SortedSetCommandsMixin :members: Server commands --------------- .. autoclass:: ServerCommandsMixin :members: HyperLogLog commands -------------------- .. autoclass:: HyperLogLogCommandsMixin :members: Transaction commands -------------------- .. autoclass:: TransactionsCommandsMixin :members: .. class:: Pipeline(connection, commands_factory=lambda conn: conn, \*,\ loop=None) Commands pipeline. Buffers commands for execution in bulk. This class implements `__getattr__` method allowing to call methods on instance created with ``commands_factory``. :param connection: Redis connection :type connection: aioredis.RedisConnection :param callable commands_factory: Commands factory to get methods from. :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` .. comethod:: execute(\*, return_exceptions=False) Executes all buffered commands and returns result. Any exception that is raised by any command is caught and raised later when processing results. If ``return_exceptions`` is set to ``True`` then all collected errors are returned in resulting list otherwise single :exc:`aioredis.PipelineError` exception is raised (containing all collected errors). :param bool return_exceptions: Raise or return exceptions. :raise aioredis.PipelineError: Raised when any command caused error. .. class:: MultiExec(connection, commands_factory=lambda conn: conn, \*,\ loop=None) Bases: :class:`~Pipeline`. Multi/Exec pipeline wrapper. See :class:`~Pipeline` for parameters description. .. comethod:: execute(\*, return_exceptions=False) Executes all buffered commands and returns result. see :meth:`Pipeline.execute` for details. :param bool return_exceptions: Raise or return exceptions. :raise aioredis.MultiExecError: Raised instead of :exc:`aioredis.PipelineError` :raise aioredis.WatchVariableError: If watched variable is changed Scripting commands ------------------ .. autoclass:: ScriptingCommandsMixin :members: Server commands --------------- .. autoclass:: ServerCommandsMixin :members: Pub/Sub commands ---------------- Also see :ref:`aioredis.Channel`. .. autoclass:: PubSubCommandsMixin :members: Cluster commands ---------------- .. warning:: Current release (|release|) of the library **does not support** `Redis Cluster`_ in a full manner. It provides only several API methods which may be changed in future. .. _Redis Cluster: http://redis.io/topics/cluster-tutorial .. :: .. autoclass:: ClusterCommandsMixin :members: aioredis-1.0.0/docs/sentinel.rst0000644000175000017500000001564413203624357017431 0ustar alexeyalexey00000000000000.. highlight:: python3 .. module:: aioredis.sentinel :mod:`aioredis.sentinel` --- Sentinel Client Reference ====================================================== This section contains reference for Redis Sentinel client. Sample usage: .. code:: python import aioredis sentinel = await aioredis.create_sentinel( [('sentinel.host1', 26379), ('sentinel.host2', 26379)]) redis = sentinel.master_for('mymaster') assert await redis.set('key', 'value') assert await redis.get('key', encoding='utf-8') == 'value' # redis client will reconnect/reconfigure automatically # by sentinel client instance ``RedisSentinel`` ----------------- .. corofunction:: create_sentinel(sentinels, \*, db=None, password=None,\ encoding=None, minsize=1, maxsize=10,\ ssl=None, parser=None,\ loop=None) Creates Redis Sentinel client. :param sentinels: A list of Sentinel node addresses. :type sentinels: list[tuple] :param int db: Redis database index to select for every master/slave connections. :param password: Password to use if Redis server instance requires authorization. :type password: str or bytes or None :param encoding: Codec to use for response decoding. :type encoding: str or None :param int minsize: Minimum number of connections (to master or slave) to initialize and keep in pool. Default is 1. :param int maxsize: Maximum number of connections (to master or slave) that can be created in pool. Default is 10. :param ssl: SSL context that is passed through to :func:`asyncio.BaseEventLoop.create_connection`. :type ssl: :class:`ssl.SSLContext` or True or None :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` :rtype: RedisSentinel .. class:: RedisSentinel Redis Sentinel client. The class provides interface to Redis Sentinel commands as well as few methods to acquire managed Redis clients, see below. .. attribute:: closed ``True`` if client is closed. .. method:: master_for(name) Get :class:`~.Redis` client to named master. The client is instantiated with special connections pool which is controlled by :class:`SentinelPool`. **This method is not a coroutine.** :param str name: Service name. :rtype: aioredis.Redis .. method:: slave_for(name) Get :class:`~.Redis` client to named slave. The client is instantiated with special connections pool which is controlled by :class:`SentinelPool`. **This method is not a coroutine.** :param str name: Service name. :rtype: aioredis.Redis .. method:: execute(command, \*args, \**kwargs) Execute Sentinel command. Every command is prefixed with ``SENTINEL`` automatically. :rtype: asyncio.Future .. comethod:: ping() Send PING to Sentinel instance. Currently the ping command will be sent to first sentinel in pool, this may change in future. .. method:: master(name) Returns a dictionary containing the specified master's state. Please refer to Redis documentation for more info on returned data. :rtype: asyncio.Future .. method:: master_address(name) Returns a ``(host, port)`` pair for the given service name. :rtype: asyncio.Future .. method:: masters() Returns a list of dictionaries containing all masters' states. :rtype: asyncio.Future .. method:: slaves(name) Returns a list of slaves for the given service name. :rtype: asyncio.Future .. method:: sentinels(name) Returns a list of Sentinels for the given service name. :rtype: asyncio.Future .. method:: monitor(name, ip, port, quorum) Add a new master to be monitored by this Sentinel. :param str name: Service name. :param str ip: New node's IP address. :param int port: Node's TCP port. :param int quorum: Sentinel quorum. .. method:: remove(name) Remove a master from Sentinel's monitoring. :param str name: Service name .. method:: set(name, option, value) Set Sentinel monitoring parameter for a given master. Please refer to Redis documentation for more info on options. :param str name: Master's name. :param str option: Monitoring option name. :param str value: Monitoring option value. .. method:: failover(name) Force a failover of a named master. :param str name: Master's name. .. method:: check_quorum(name) Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. :param str name: Master's name. .. method:: close() Close all opened connections. .. comethod:: wait_closed() Wait until all connections are closed. ``SentinelPool`` ---------------- .. warning:: This API has not yet stabilized and may change in future releases. .. cofunction:: create_sentinel_pool(sentinels, \*, db=None, password=None,\ encoding=None, minsize=1, maxsize=10,\ ssl=None, parser=None, loop=None) Creates Sentinel connections pool. .. class:: SentinelPool Sentinel connections pool. This pool manages both sentinel connections and Redis master/slave connections. .. attribute:: closed ``True`` if pool and all connections are closed. .. method:: master_for(name) Returns a managed connections pool for requested service name. :param str name: Service name. :rtype: ``ManagedPool`` .. method:: slave_for(name) Returns a managed connections pool for requested service name. :param str name: Service name. :rtype: ``ManagedPool`` .. method:: execute(command, \*args, \**kwargs) Execute Sentinel command. .. comethod:: discover(timeout=0.2) Discover Sentinels and all monitored services within given timeout. This will reset internal state of this pool. .. comethod:: discover_master(service, timeout) Perform named master discovery. :param str service: Service name. :param float timeout: Operation timeout :rtype: aioredis.RedisConnection .. comethod:: discover_slave(service, timeout) Perform slave discovery. :param str service: Service name. :param float timeout: Operation timeout :rtype: aioredis.RedisConnection .. method:: close() Close all controlled connections (both to sentinel and redis). .. comethod:: wait_closed() Wait until pool gets closed. aioredis-1.0.0/docs/glossary.rst0000664000175000017500000000126713012410141017427 0ustar alexeyalexey00000000000000.. _glossary: Glossary ======== .. glossary:: :sorted: asyncio Reference implementation of :pep:`3156` See https://pypi.python.org/pypi/asyncio hiredis Python extension that wraps protocol parsing code in `hiredis`_. See https://pypi.python.org/pypi/hiredis error replies Redis server replies that start with - (minus) char. Usually starts with ``-ERR``. pytest A mature full-featured Python testing tool. See http://pytest.org/latest/ uvloop Is an ultra fast implementation of asyncio event loop on top of libuv. See https://github.com/MagicStack/uvloop .. _hiredis: https://github.com/redis/hiredis aioredis-1.0.0/docs/devel.rst0000664000175000017500000001152613012410141016662 0ustar alexeyalexey00000000000000.. highlight:: bash .. _github: https://github.com/aio-libs/aioredis Contributing ============ To start contributing you must read all the following. First you must fork/clone repo from `github`_:: $ git clone git@github.com:aio-libs/aioredis.git Next, you should install all python dependencies, it is as easy as running single command:: $ make devel this command will install: * ``sphinx`` for building documentation; * ``pytest`` for running tests; * ``flake8`` for code linting; * and few other packages. Code style ---------- Code **must** be pep8 compliant. You can check it with following command:: $ make flake Running tests ------------- You can run tests in any of the following ways:: # will run tests in a verbose mode $ make test # or $ py.test # will run tests with coverage report $ make cov # or $ py.test --cov SSL tests ~~~~~~~~~ Running SSL tests requires following additional programs to be installed: * ``openssl`` -- to generate test key and certificate; * ``socat`` -- to make SSL proxy; To install these on Ubuntu and generate test key & certificate run:: $ sudo apt-get install socat openssl $ make certificate Different Redis server versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To run tests against different redises use ``--redis-server`` command line option:: $ py.test --redis-server=/path/to/custom/redis-server UVLoop ~~~~~~ To run tests with :term:`uvloop`:: $ pip install uvloop $ py.test --uvloop .. note:: Until Python 3.5.2 EventLoop has no ``create_future`` method so aioredis won't benefit from uvloop's futures. Writing tests ------------- :mod:`aioredis` uses :term:`pytest` tool. Tests are located under ``/tests`` directory. Pure Python 3.5 tests (ie the ones using ``async``/``await`` syntax) must be prefixed with ``py35_``, for instance see:: tests/py35_generic_commands_tests.py tests/py35_pool_test.py Fixtures ~~~~~~~~ There is a number of fixtures that can be used to write tests: .. attribute:: loop Current event loop used for test. This is a function-scope fixture. Using this fixture will always create new event loop and set global one to None. .. code-block:: python def test_with_loop(loop): @asyncio.coroutine def do_something(): pass loop.run_until_complete(do_something()) .. function:: unused_port() Finds and returns free TCP port. .. code-block:: python def test_bind(unused_port): port = unused_port() assert 1024 < port <= 65535 .. cofunction:: create_connection(\*args, \**kw) Wrapper around :func:`aioredis.create_connection`. Only difference is that it registers connection to be closed after test case, so you should not be worried about unclosed connections. .. cofunction:: create_redis(\*args, \**kw) Wrapper around :func:`aioredis.create_redis`. .. cofunction:: create_pool(\*args, \**kw) Wrapper around :func:`aioredis.create_pool`. .. attribute:: redis Redis client instance. .. attribute:: pool RedisPool instance. .. attribute:: server Redis server instance info. Namedtuple with following properties: name server instance name. port Bind port. unixsocket Bind unixsocket path. version Redis server version tuple. .. attribute:: serverB Second predefined Redis server instance info. .. function:: start_server(name) Start Redis server instance. Redis instances are cached by name. :return: server info tuple, see :attr:`server`. :rtype: tuple .. function:: ssl_proxy(unsecure_port) Start SSL proxy. :param int unsecure_port: Redis server instance port :return: secure_port and ssl_context pair :rtype: tuple Helpers ~~~~~~~ :mod:`aioredis` also updates :term:`pytest`'s namespace with several helpers. .. function:: pytest.redis_version(\*version, reason) Marks test with minimum redis version to run. Example: .. code-block:: python @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") def test_hstrlen(redis): pass .. function:: pytest.logs(logger, level=None) Adopted version of :meth:`unittest.TestCase.assertEqual`, see it for details. Example: .. code-block:: python def test_logs(create_connection, server): with pytest.logs('aioredis', 'DEBUG') as cm: conn yield from create_connection(server.tcp_address) assert cm.output[0].startswith( 'DEBUG:aioredis:Creating tcp connection') .. function:: pytest.assert_almost_equal(first, second, places=None, \ msg=None, delta=None) Adopted version of :meth:`unittest.TestCase.assertAlmostEqual`. .. function:: pytest.raises_regex(exc_type, message) Adopted version of :meth:`unittest.TestCase.assertRaisesRegex`. aioredis-1.0.0/docs/index.rst0000644000175000017500000000446713203624357016720 0ustar alexeyalexey00000000000000.. aioredis documentation master file, created by sphinx-quickstart on Thu Jun 12 22:57:11 2014. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. aioredis ======== asyncio (:pep:`3156`) Redis client library. The library is intended to provide simple and clear interface to Redis based on :term:`asyncio`. Features -------- ================================ ============================== :term:`hiredis` parser Yes Pure-python parser Yes Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes Sentinel support Yes [1]_ Redis Cluster support WIP Trollius (python 2.7) No Tested CPython versions `3.5, 3.6 `_ [2]_ Tested PyPy3 versions `5.9.0 `_ Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ Support for dev Redis server through low-level API ================================ ============================== .. [1] Sentinel support is available in master branch. This feature is not yet stable and may have some issues. .. [2] For Python 3.3, 3.4 support use aioredis v0.3. Installation ------------ The easiest way to install aioredis is by using the package on PyPi:: pip install aioredis Requirements ------------ - Python 3.5.3+ - :term:`hiredis` Benchmarks ---------- Benchmarks can be found here: https://github.com/popravich/python-redis-benchmark Contribute ---------- - Issue Tracker: https://github.com/aio-libs/aioredis/issues - Source Code: https://github.com/aio-libs/aioredis - Contributor's guide: :doc:`devel` Feel free to file an issue or make pull request if you find any bugs or have some suggestions for library improvement. License ------- The aioredis is offered under `MIT license`_. ---- Contents ======== .. toctree:: :maxdepth: 3 start migration api_reference mixins abc mpsc sentinel examples devel releases glossary Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. _MIT license: https://github.com/aio-libs/aioredis/blob/master/LICENSE .. _travis: https://travis-ci.org/aio-libs/aioredis aioredis-1.0.0/docs/api_reference.rst0000644000175000017500000006064013203624357020373 0ustar alexeyalexey00000000000000:mod:`aioredis` --- API Reference ================================= .. highlight:: python3 .. module:: aioredis .. _aioredis-connection: Connection ---------- Redis Connection is the core function of the library. Connection instances can be used as is or through :ref:`pool` or :ref:`high-level API`. Connection usage is as simple as: .. code:: python import asyncio import aioredis async def connect_uri(): conn = await aioredis.create_connection( 'redis://localhost/0') val = await conn.execute('GET', 'my-key') async def connect_tcp(): conn = await aioredis.create_connection( ('localhost', 6379)) val = await conn.execute('GET', 'my-key') async def connect_unixsocket(): conn = await aioredis.create_connection( '/path/to/redis/socket') # or uri 'unix:///path/to/redis/socket?db=1' val = await conn.execute('GET', 'my-key') asyncio.get_event_loop().run_until_complete(connect_tcp()) asyncio.get_event_loop().run_until_complete(connect_unixsocket()) .. cofunction:: create_connection(address, \*, db=0, password=None, ssl=None,\ encoding=None, parser=None, loop=None,\ timeout=None) Creates Redis connection. .. versionchanged:: v0.3.1 ``timeout`` argument added. .. versionchanged:: v1.0 ``parser`` argument added. :param address: An address where to connect. Can be one of the following: * a Redis URI --- ``"redis://host:6379/0?encoding=utf-8"``; * a (host, port) tuple --- ``('localhost', 6379)``; * or a unix domain socket path string --- ``"/path/to/redis.sock"``. :type address: tuple or str :param int db: Redis database index to switch to when connected. :param password: Password to use if redis server instance requires authorization. :type password: str or None :param ssl: SSL context that is passed through to :func:`asyncio.BaseEventLoop.create_connection`. :type ssl: :class:`ssl.SSLContext` or True or None :param encoding: Codec to use for response decoding. :type encoding: str or None :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` :param timeout: Max time to open a connection, otherwise raise :exc:`asyncio.TimeoutError` exception. ``None`` by default :type timeout: float greater than 0 or None :return: :class:`RedisConnection` instance. .. class:: RedisConnection Bases: :class:`abc.AbcConnection` Redis connection interface. .. attribute:: address Redis server address; either IP-port tuple or unix socket str (*read-only*). IP is either IPv4 or IPv6 depending on resolved host part in initial address. .. versionadded:: v0.2.8 .. attribute:: db Current database index (*read-only*). .. attribute:: encoding Current codec for response decoding (*read-only*). .. attribute:: closed Set to ``True`` if connection is closed (*read-only*). .. attribute:: in_transaction Set to ``True`` when MULTI command was issued (*read-only*). .. attribute:: pubsub_channels *Read-only* dict with subscribed channels. Keys are bytes, values are :class:`~aioredis.Channel` instances. .. attribute:: pubsub_patterns *Read-only* dict with subscribed patterns. Keys are bytes, values are :class:`~aioredis.Channel` instances. .. attribute:: in_pubsub Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. *Read-only*. .. method:: execute(command, \*args, encoding=_NOTSET) Execute Redis command. The method is **not a coroutine** itself but instead it writes to underlying transport and returns a :class:`asyncio.Future` waiting for result. :param command: Command to execute :type command: str, bytes, bytearray :param encoding: Keyword-only argument for overriding response decoding. By default will use connection-wide encoding. May be set to None to skip response decoding. :type encoding: str or None :raise TypeError: When any of arguments is None or can not be encoded as bytes. :raise aioredis.ReplyError: For redis error replies. :raise aioredis.ProtocolError: When response can not be decoded and/or connection is broken. :return: Returns bytes or int reply (or str if encoding was set) .. method:: execute_pubsub(command, \*channels_or_patterns) Method to execute Pub/Sub commands. The method is not a coroutine itself but returns a :func:`asyncio.gather()` coroutine. Method also accept :class:`aioredis.Channel` instances as command arguments:: >>> ch1 = Channel('A', is_pattern=False, loop=loop) >>> await conn.execute_pubsub('subscribe', ch1) [[b'subscribe', b'A', 1]] .. versionchanged:: v0.3 The method accept :class:`~aioredis.Channel` instances. :param command: One of the following Pub/Sub commands: ``subscribe``, ``unsubscribe``, ``psubscribe``, ``punsubscribe``. :type command: str, bytes, bytearray :param \*channels_or_patterns: Channels or patterns to subscribe connection to or unsubscribe from. At least one channel/pattern is required. :return: Returns a list of subscribe/unsubscribe messages, ex:: >>> await conn.execute_pubsub('subscribe', 'A', 'B') [[b'subscribe', b'A', 1], [b'subscribe', b'B', 2]] .. method:: close() Closes connection. Mark connection as closed and schedule cleanup procedure. All pending commands will be canceled with :exc:`ConnectionForcedCloseError`. .. method:: wait_closed() Coroutine waiting for connection to get closed. .. method:: select(db) Changes current db index to new one. :param int db: New redis database index. :raise TypeError: When ``db`` parameter is not int. :raise ValueError: When ``db`` parameter is less then 0. :return True: Always returns True or raises exception. .. method:: auth(password) Send AUTH command. :param str password: Plain-text password :return bool: True if redis replied with 'OK'. ---- .. _aioredis-pool: Connections Pool ---------------- The library provides connections pool. The basic usage is as follows: .. code:: python import aioredis async def sample_pool(): pool = await aioredis.create_pool('redis://localhost') val = await pool.execute('get', 'my-key') .. _aioredis-create_pool: .. function:: create_pool(address, \*, db=0, password=None, ssl=None, \ encoding=None, minsize=1, maxsize=10, \ parser=None, loop=None, \ create_connection_timeout=None, \ pool_cls=None, connection_cls=None) A :ref:`coroutine` that instantiates a pool of :class:`~.RedisConnection`. .. versionchanged:: v0.2.7 ``minsize`` default value changed from 10 to 1. .. versionchanged:: v0.2.8 Disallow arbitrary ConnectionsPool maxsize. .. deprecated:: v0.2.9 *commands_factory* argument is deprecated and will be removed in *v1.0*. .. versionchanged:: v0.3.2 ``create_connection_timeout`` argument added. .. versionchanged: v1.0 ``commands_factory`` argument has been dropped. .. versionadded:: v1.0 ``parser``, ``pool_cls`` and ``connection_cls`` arguments added. :param address: An address where to connect. Can be one of the following: * a Redis URI --- ``"redis://host:6379/0?encoding=utf-8"``; * a (host, port) tuple --- ``('localhost', 6379)``; * or a unix domain socket path string --- ``"/path/to/redis.sock"``. :type address: tuple or str :param int db: Redis database index to switch to when connected. :param password: Password to use if redis server instance requires authorization. :type password: str or None :param ssl: SSL context that is passed through to :func:`asyncio.BaseEventLoop.create_connection`. :type ssl: :class:`ssl.SSLContext` or True or None :param encoding: Codec to use for response decoding. :type encoding: str or None :param int minsize: Minimum number of free connection to create in pool. ``1`` by default. :param int maxsize: Maximum number of connection to keep in pool. ``10`` by default. Must be greater then ``0``. ``None`` is disallowed. :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` :param create_connection_timeout: Max time to open a connection, otherwise raise an :exc:`asyncio.TimeoutError`. ``None`` by default. :type create_connection_timeout: float greater than 0 or None :param pool_cls: Can be used to instantiate custom pool class. This argument **must be** a subclass of :class:`~aioredis.abc.AbcPool`. :type pool_cls: aioredis.abc.AbcPool :param connection_cls: Can be used to make pool instantiate custom connection classes. This argument **must be** a subclass of :class:`~aioredis.abc.AbcConnection`. :type connection_cls: aioredis.abc.AbcConnection :return: :class:`ConnectionsPool` instance. .. class:: ConnectionsPool Bases: :class:`abc.AbcPool` Redis connections pool. .. attribute:: minsize A minimum size of the pool (*read-only*). .. attribute:: maxsize A maximum size of the pool (*read-only*). .. attribute:: size Current pool size --- number of free and used connections (*read-only*). .. attribute:: freesize Current number of free connections (*read-only*). .. attribute:: db Currently selected db index (*read-only*). .. attribute:: encoding Current codec for response decoding (*read-only*). .. attribute:: closed ``True`` if pool is closed. .. versionadded:: v0.2.8 .. method:: execute(command, \*args, \**kwargs) Execute Redis command in a free connection and return :class:`asyncio.Future` waiting for result. This method tries to pick a free connection from pool and send command through it at once (keeping pipelining feature provided by :meth:`aioredis.RedisConnection.execute`). If no connection is found --- returns coroutine waiting for free connection to execute command. .. versionadded:: v1.0 .. method:: execute_pubsub(command, \*channels) Execute Redis (p)subscribe/(p)unsubscribe command. ``ConnectionsPool`` picks separate free connection for pub/sub and uses it until pool is closed or connection is disconnected (unsubscribing from all channels/pattern will leave connection locked for pub/sub use). There is no auto-reconnect for Pub/Sub connection as this will hide from user messages loss. Has similar to :meth:`execute` behavior, ie: tries to pick free connection from pool and switch it to pub/sub mode; or fallback to coroutine waiting for free connection and repeating operation. .. versionadded:: v1.0 .. method:: get_connection(command, args=()) Gets free connection from pool returning tuple of (connection, address). If no free connection is found -- None is returned in place of connection. :rtype: tuple(:class:`RedisConnection` or None, str) .. versionadded:: v1.0 .. comethod:: clear() Closes and removes all free connections in the pool. .. comethod:: select(db) Changes db index for all free connections in the pool. :param int db: New database index. .. comethod:: acquire(command=None, args=()) Acquires a connection from *free pool*. Creates new connection if needed. :param command: reserved for future. :param args: reserved for future. :raises aioredis.PoolClosedError: if pool is already closed .. method:: release(conn) Returns used connection back into pool. When returned connection has db index that differs from one in pool the connection will be dropped. When queue of free connections is full the connection will be dropped. .. note:: This method is **not a coroutine**. :param aioredis.RedisConnection conn: A RedisConnection instance. .. method:: close() Close all free and in-progress connections and mark pool as closed. .. versionadded:: v0.2.8 .. comethod:: wait_closed() Wait until pool gets closed (when all connections are closed). .. versionadded:: v0.2.8 ---- .. _aioredis-channel: Pub/Sub Channel object ---------------------- `Channel` object is a wrapper around queue for storing received pub/sub messages. .. class:: Channel(name, is_pattern, loop=None) Bases: :class:`abc.AbcChannel` Object representing Pub/Sub messages queue. It's basically a wrapper around :class:`asyncio.Queue`. .. attribute:: name Holds encoded channel/pattern name. .. attribute:: is_pattern Set to True for pattern channels. .. attribute:: is_active Set to True if there are messages in queue and connection is still subscribed to this channel. .. comethod:: get(\*, encoding=None, decoder=None) Coroutine that waits for and returns a message. Return value is message received or None signifying that channel has been unsubscribed and no more messages will be received. :param str encoding: If not None used to decode resulting bytes message. :param callable decoder: If specified used to decode message, ex. :func:`json.loads()` :raise aioredis.ChannelClosedError: If channel is unsubscribed and has no more messages. .. method:: get_json(\*, encoding="utf-8") Shortcut to ``get(encoding="utf-8", decoder=json.loads)`` .. comethod:: wait_message() Waits for message to become available in channel. Main idea is to use it in loops: >>> ch = redis.channels['channel:1'] >>> while await ch.wait_message(): ... msg = await ch.get() .. comethod:: iter() :async-for: :coroutine: Same as :meth:`~.get` method but it is a native coroutine. Usage example:: >>> async for msg in ch.iter(): ... print(msg) .. versionadded:: 0.2.5 Available for Python 3.5 only ---- .. _aioredis-exceptions: Exceptions ---------- .. exception:: RedisError :Bases: :exc:`Exception` Base exception class for aioredis exceptions. .. exception:: ProtocolError :Bases: :exc:`RedisError` Raised when protocol error occurs. When this type of exception is raised connection must be considered broken and must be closed. .. exception:: ReplyError :Bases: :exc:`RedisError` Raised for Redis :term:`error replies`. .. exception:: MaxClientsError :Bases: :exc:`ReplyError` Raised when maximum number of clients has been reached (Redis server configured value). .. exception:: AuthError :Bases: :exc:`ReplyError` Raised when authentication errors occur. .. exception:: ConnectionClosedError :Bases: :exc:`RedisError` Raised if connection to server was lost/closed. .. exception:: ConnectionForcedCloseError :Bases: :exc:`ConnectionClosedError` Raised if connection was closed with :func:`RedisConnection.close` method. .. exception:: PipelineError :Bases: :exc:`RedisError` Raised from :meth:`~.commands.TransactionsCommandsMixin.pipeline` if any pipelined command raised error. .. exception:: MultiExecError :Bases: :exc:`PipelineError` Same as :exc:`~.PipelineError` but raised when executing multi_exec block. .. exception:: WatchVariableError :Bases: :exc:`MultiExecError` Raised if watched variable changed (EXEC returns None). Subclass of :exc:`~.MultiExecError`. .. exception:: ChannelClosedError :Bases: :exc:`RedisError` Raised from :meth:`aioredis.Channel.get` when Pub/Sub channel is unsubscribed and messages queue is empty. .. exception:: PoolClosedError :Bases: :exc:`RedisError` Raised from :meth:`aioredis.ConnectionsPool.acquire` when pool is already closed. .. exception:: ReadOnlyError :Bases: :exc:`RedisError` Raised from slave when read-only mode is enabled. .. exception:: MasterNotFoundError :Bases: :exc:`RedisError` Raised by Sentinel client if it can not find requested master. .. exception:: SlaveNotFoundError :Bases: :exc:`RedisError` Raised by Sentinel client if it can not find requested slave. .. exception:: MasterReplyError :Bases: :exc:`RedisError` Raised if establishing connection to master failed with ``RedisError``, for instance because of required or wrong authentication. .. exception:: SlaveReplyError :Bases: :exc:`RedisError` Raised if establishing connection to slave failed with ``RedisError``, for instance because of required or wrong authentication. Exceptions Hierarchy ~~~~~~~~~~~~~~~~~~~~ .. code-block:: guess Exception RedisError ProtocolError ReplyError MaxClientsError AuthError PipelineError MultiExecError WatchVariableError ChannelClosedError ConnectionClosedError ConnectionForcedCloseError PoolClosedError ReadOnlyError MasterNotFoundError SlaveNotFoundError MasterReplyError SlaveReplyError ---- .. _aioredis-redis: Commands Interface ------------------ The library provides high-level API implementing simple interface to Redis commands. The usage is as simple as: .. code:: python import aioredis # Create Redis client bound to single non-reconnecting connection. async def single_connection(): redis = await aioredis.create_redis( 'redis://localhost') val = await redis.get('my-key') # Create Redis client bound to connections pool. async def pool_of_connections(): redis = await aioredis.create_redis_pool( 'redis://localhost') val = await redis.get('my-key') # we can also use pub/sub as underlying pool # has several free connections: ch1, ch2 = await redis.subscribe('chan:1', 'chan:2') # publish using free connection await redis.publish('chan:1', 'Hello') await ch1.get() For commands reference --- see :ref:`commands mixins reference `. .. cofunction:: create_redis(address, \*, db=0, password=None, ssl=None,\ encoding=None, commands_factory=Redis,\ parser=None, timeout=None,\ connection_cls=None, loop=None) This :ref:`coroutine` creates high-level Redis interface instance bound to single Redis connection (without auto-reconnect). .. versionadded:: v1.0 ``parser``, ``timeout`` and ``connection_cls`` arguments added. See also :class:`~aioredis.RedisConnection` for parameters description. :param address: An address where to connect. Can be a (host, port) tuple, unix domain socket path string or a Redis URI string. :type address: tuple or str :param int db: Redis database index to switch to when connected. :param password: Password to use if Redis server instance requires authorization. :type password: str or bytes or None :param ssl: SSL context that is passed through to :func:`asyncio.BaseEventLoop.create_connection`. :type ssl: :class:`ssl.SSLContext` or True or None :param encoding: Codec to use for response decoding. :type encoding: str or None :param commands_factory: A factory accepting single parameter -- object implementing :class:`~abc.AbcConnection` and returning an instance providing high-level interface to Redis. :class:`Redis` by default. :type commands_factory: callable :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None :param timeout: Max time to open a connection, otherwise raise :exc:`asyncio.TimeoutError` exception. ``None`` by default :type timeout: float greater than 0 or None :param connection_cls: Can be used to instantiate custom connection class. This argument **must be** a subclass of :class:`~aioredis.abc.AbcConnection`. :type connection_cls: aioredis.abc.AbcConnection :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` :returns: Redis client (result of ``commands_factory`` call), :class:`Redis` by default. .. cofunction:: create_redis_pool(address, \*, db=0, password=None, ssl=None,\ encoding=None, commands_factory=Redis,\ minsize=1, maxsize=10,\ parser=None, timeout=None,\ pool_cls=None, connection_cls=None,\ loop=None) This :ref:`coroutine` create high-level Redis client instance bound to connections pool (this allows auto-reconnect and simple pub/sub use). See also :class:`~aioredis.ConnectionsPool` for parameters description. .. versionchanged:: v1.0 ``parser``, ``timeout``, ``pool_cls`` and ``connection_cls`` arguments added. :param address: An address where to connect. Can be a (host, port) tuple, unix domain socket path string or a Redis URI string. :type address: tuple or str :param int db: Redis database index to switch to when connected. :param password: Password to use if Redis server instance requires authorization. :type password: str or bytes or None :param ssl: SSL context that is passed through to :func:`asyncio.BaseEventLoop.create_connection`. :type ssl: :class:`ssl.SSLContext` or True or None :param encoding: Codec to use for response decoding. :type encoding: str or None :param commands_factory: A factory accepting single parameter -- object implementing :class:`~abc.AbcConnection` interface and returning an instance providing high-level interface to Redis. :class:`Redis` by default. :type commands_factory: callable :param int minsize: Minimum number of connections to initialize and keep in pool. Default is 1. :param int maxsize: Maximum number of connections that can be created in pool. Default is 10. :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None :param timeout: Max time to open a connection, otherwise raise :exc:`asyncio.TimeoutError` exception. ``None`` by default :type timeout: float greater than 0 or None :param pool_cls: Can be used to instantiate custom pool class. This argument **must be** a subclass of :class:`~aioredis.abc.AbcPool`. :type pool_cls: aioredis.abc.AbcPool :param connection_cls: Can be used to make pool instantiate custom connection classes. This argument **must be** a subclass of :class:`~aioredis.abc.AbcConnection`. :type connection_cls: aioredis.abc.AbcConnection :param loop: An optional *event loop* instance (uses :func:`asyncio.get_event_loop` if not specified). :type loop: :ref:`EventLoop` :returns: Redis client (result of ``commands_factory`` call), :class:`Redis` by default. aioredis-1.0.0/docs/start.rst0000644000175000017500000001077413203624357016744 0ustar alexeyalexey00000000000000.. highlight:: python3 .. module:: aioredis.commands Getting started =============== Commands Pipelining ------------------- Commands pipelining is built-in. Every command is sent to transport at-once (ofcourse if no ``TypeError``/``ValueError`` was raised) When you making a call with ``await`` / ``yield from`` you will be waiting result, and then gather results. Simple example show both cases (:download:`get source code<../examples/pipeline.py>`): .. literalinclude:: ../examples/pipeline.py :language: python3 :lines: 9-21 :dedent: 4 .. note:: For convenience :mod:`aioredis` provides :meth:`~TransactionsCommandsMixin.pipeline` method allowing to execute bulk of commands as one (:download:`get source code<../examples/pipeline.py>`): .. literalinclude:: ../examples/pipeline.py :language: python3 :lines: 23-31 :dedent: 4 Multi/Exec transactions ----------------------- :mod:`aioredis` provides several ways for executing transactions: * when using raw connection you can issue ``Multi``/``Exec`` commands manually; * when using :class:`aioredis.Redis` instance you can use :meth:`~TransactionsCommandsMixin.multi_exec` transaction pipeline. :meth:`~TransactionsCommandsMixin.multi_exec` method creates and returns new :class:`~aioredis.commands.MultiExec` object which is used for buffering commands and then executing them inside MULTI/EXEC block. Here is a simple example (:download:`get source code<../examples/transaction2.py>`): .. literalinclude:: ../examples/transaction2.py :language: python3 :lines: 9-15 :linenos: :emphasize-lines: 5 :dedent: 4 As you can notice ``await`` is **only** used at line 5 with ``tr.execute`` and **not with** ``tr.set(...)`` calls. .. warning:: It is very important not to ``await`` buffered command (ie ``tr.set('foo', '123')``) as it will block forever. The following code will block forever:: tr = redis.multi_exec() await tr.incr('foo') # that's all. we've stuck! Pub/Sub mode ------------ :mod:`aioredis` provides support for Redis Publish/Subscribe messaging. To switch connection to subscribe mode you must execute ``subscribe`` command by yield'ing from :meth:`~PubSubCommandsMixin.subscribe` it returns a list of :class:`~aioredis.Channel` objects representing subscribed channels. As soon as connection is switched to subscribed mode the channel will receive and store messages (the ``Channel`` object is basically a wrapper around :class:`asyncio.Queue`). To read messages from channel you need to use :meth:`~aioredis.Channel.get` or :meth:`~aioredis.Channel.get_json` coroutines. .. note:: In Pub/Sub mode redis connection can only receive messages or issue (P)SUBSCRIBE / (P)UNSUBSCRIBE commands. Pub/Sub example (:download:`get source code<../examples/pubsub2.py>`): .. literalinclude:: ../examples/pubsub2.py :language: python3 :lines: 6-31 :dedent: 4 .. .. warning:: Using Pub/Sub mode with :class:`~aioredis.Pool` is possible but only within ``with`` block or by explicitly ``acquiring/releasing`` connection. See example below. Pub/Sub example (:download:`get source code<../examples/pool_pubsub.py>`): .. literalinclude:: ../examples/pool_pubsub.py :language: python3 :lines: 13-36 :dedent: 4 Python 3.5 ``async with`` / ``async for`` support ------------------------------------------------- :mod:`aioredis` is compatible with :pep:`492`. :class:`~aioredis.Pool` can be used with :ref:`async with` (:download:`get source code<../examples/pool2.py>`): .. literalinclude:: ../examples/pool2.py :language: python3 :lines: 7-8,20-22 :dedent: 4 It also can be used with ``await``: .. literalinclude:: ../examples/pool2.py :language: python3 :lines: 7-8,26-30 :dedent: 4 New ``scan``-family commands added with support of :ref:`async for` (:download:`get source code<../examples/iscan.py>`): .. literalinclude:: ../examples/iscan.py :language: python3 :lines: 7-9,29-31,34-36,39-41,44-45 :dedent: 4 SSL/TLS support --------------- Though Redis server `does not support data encryption `_ it is still possible to setup Redis server behind SSL proxy. For such cases :mod:`aioredis` library support secure connections through :mod:`asyncio` SSL support. See `BaseEventLoop.create_connection`_ for details. .. _data_encryption: http://redis.io/topics/security#data-encryption-support .. _BaseEventLoop.create_connection: https://docs.python.org/3/library/asyncio-eventloop.html#creating-connections aioredis-1.0.0/CHANGES.txt0000644000175000017500000001417413203624462015731 0ustar alexeyalexey00000000000000Changes ------- 1.0.0 (2017-11-17) ^^^^^^^^^^^^^^^^^^ **NEW**: * **Important!** Drop Python 3.3, 3.4 support; (see `#321 `_, `#323 `_ and `#326 `_); * **Important!** Connections pool has been refactored; now ``create_redis`` function will yield ``Redis`` instance instead of ``RedisPool`` (see `#129 `_); * **Important!** Change sorted set commands reply format: return list of tuples instead of plain list for commands accepting ``withscores`` argument (see `#334 `_); * **Important!** Change ``hscan`` command reply format: return list of tuples instead of mixed key-value list (see `#335 `_); * Implement Redis URI support as supported ``address`` argument value (see `#322 `_); * Dropped ``create_reconnecting_redis``, ``create_redis_pool`` should be used instead; * Implement custom ``StreamReader`` (see `#273 `_); * Implement Sentinel support (see `#181 `_); * Implement pure-python parser (see `#212 `_); * Add ``migrate_keys`` command (see `#187 `_); * Add ``zrevrangebylex`` command (see `#201 `_); * Add ``command``, ``command_count``, ``command_getkeys`` and ``command_info`` commands (see `#229 `_); * Add ``ping`` support in pubsub connection (see `#264 `_); * Add ``exist`` parameter to ``zadd`` command (see `#288 `_); * Add ``MaxClientsError`` and implement ``ReplyError`` specialization (see `#325 `_); * Add ``encoding`` parameter to sorted set commands (see `#289 `_); **FIX**: * Fix ``CancelledError`` in ``conn._reader_task`` (see `#301 `_); * Fix pending commands cancellation with ``CancelledError``, use explicit exception instead of calling ``cancel()`` method (see `#316 `_); * Correct error message on Sentinel discovery of master/slave with password (see `#327 `_); * Fix ``bytearray`` support as command argument (see `#329 `_); * Fix critical bug in patched asyncio.Lock (see `#256 `_); * Fix Multi/Exec transaction canceled error (see `#225 `_); * Add missing arguments to ``create_redis`` and ``create_redis_pool``; * Fix deprecation warning (see `#191 `_); * Make correct ``__aiter__()`` (see `#192 `_); * Backward compatibility fix for ``with (yield from pool) as conn:`` (see `#205 `_); * Fixed pubsub receiver stop() (see `#211 `_); **MISC**: * Multiple test fixes; * Add PyPy3 to build matrix; * Update dependencies versions; * Add missing Python 3.6 classifier; 0.3.5 (2017-11-08) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix for indistinguishable futures cancellation with ``asyncio.CancelledError`` (see `#316 `_), cherry-picked from master; 0.3.4 (2017-10-25) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix time command result decoding when using connection-wide encoding setting (see `#266 `_); 0.3.3 (2017-06-30) ^^^^^^^^^^^^^^^^^^ **FIX**: * Critical bug fixed in patched asyncio.Lock (see `#256 `_); 0.3.2 (2017-06-21) ^^^^^^^^^^^^^^^^^^ **NEW**: * Added ``zrevrangebylex`` command (see `#201 `_), cherry-picked from master; * Add connection timeout (see `#221 `_), cherry-picked from master; **FIX**: * Fixed pool close warning (see `#239 `_ and `#236 `_), cherry-picked from master; * Fixed asyncio Lock deadlock issue (see `#231 `_ and `#241 `_); 0.3.1 (2017-05-09) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix pubsub Receiver missing iter() method (see `#203 `_); 0.3.0 (2017-01-11) ^^^^^^^^^^^^^^^^^^ **NEW**: * Pub/Sub connection commands accept ``Channel`` instances (see `#168 `_); * Implement new Pub/Sub MPSC (multi-producers, single-consumer) Queue -- ``aioredis.pubsub.Receiver`` (see `#176 `_); * Add ``aioredis.abc`` module providing abstract base classes defining interface for basic lib components; (see `#176 `_); * Implement Geo commands support (see `#177 `_ and `#179 `_); **FIX**: * Minor tests fixes; **MISC**: * Update examples and docs to use ``async``/``await`` syntax also keeping ``yield from`` examples for history (see `#173 `_); * Reflow Travis CI configuration; add Python 3.6 section (see `#170 `_); * Add AppVeyor integration to run tests on Windows (see `#180 `_); * Update multiple development requirements; aioredis-1.0.0/CONTRIBUTORS.txt0000644000175000017500000000071613203624357016616 0ustar alexeyalexey00000000000000Contributors ------------ Adam Rothman Aider Ibragimov Alexander Shorin Aliaksei Urbanski Andrew Svetlov Anton Verinov Dima Kruk Hugo Ihor Gorobets Ihor Liubymov James Hilliard Jan Špaček Jeff Moser SeungHyun Hwang Leonid Shvechikov Manuel Miranda Marijn Giesen Martin Nickolai Novik Pau Freixes Paul Colomiets Samuel Colvin Samuel Dion-Girardeau Taku Fukada Taras Voinarovskyi Thanos Lefteris Thomas Steinacher Volodymyr Hotsyk aioredis-1.0.0/setup.cfg0000644000175000017500000000054513203634127015735 0ustar alexeyalexey00000000000000[tool:pytest] minversion = 2.9.1 addopts = --cov-report=term --cov-report=html restpaths = tests markers = run_loop: Mark coroutine to be run with asyncio loop. redis_version(*version, reason): Mark test expecting minimum Redis version skip(reason): Skip test [coverage:run] branch = true source = aioredis,tests [egg_info] tag_build = tag_date = 0 aioredis-1.0.0/aioredis.egg-info/0000755000175000017500000000000013203634127017401 5ustar alexeyalexey00000000000000aioredis-1.0.0/aioredis.egg-info/SOURCES.txt0000644000175000017500000000435313203634127021272 0ustar alexeyalexey00000000000000CHANGES.txt CONTRIBUTORS.txt LICENSE MANIFEST.in README.rst setup.cfg setup.py aioredis/__init__.py aioredis/abc.py aioredis/connection.py aioredis/errors.py aioredis/locks.py aioredis/log.py aioredis/parser.py aioredis/pool.py aioredis/pubsub.py aioredis/stream.py aioredis/util.py aioredis.egg-info/PKG-INFO aioredis.egg-info/SOURCES.txt aioredis.egg-info/dependency_links.txt aioredis.egg-info/requires.txt aioredis.egg-info/top_level.txt aioredis/commands/__init__.py aioredis/commands/cluster.py aioredis/commands/generic.py aioredis/commands/geo.py aioredis/commands/hash.py aioredis/commands/hyperloglog.py aioredis/commands/list.py aioredis/commands/pubsub.py aioredis/commands/scripting.py aioredis/commands/server.py aioredis/commands/set.py aioredis/commands/sorted_set.py aioredis/commands/string.py aioredis/commands/transaction.py aioredis/sentinel/__init__.py aioredis/sentinel/commands.py aioredis/sentinel/pool.py docs/abc.rst docs/api_reference.rst docs/devel.rst docs/examples.rst docs/glossary.rst docs/index.rst docs/migration.rst docs/mixins.rst docs/mpsc.rst docs/releases.rst docs/sentinel.rst docs/start.rst docs/_build/man/aioredis.1 examples/commands.py examples/connection.py examples/iscan.py examples/pipeline.py examples/pool.py examples/pool2.py examples/pool_pubsub.py examples/pubsub.py examples/pubsub2.py examples/scan.py examples/sentinel.py examples/transaction.py examples/transaction2.py tests/coerced_keys_dict_test.py tests/conftest.py tests/connection_commands_test.py tests/connection_test.py tests/encode_command_test.py tests/errors_test.py tests/generic_commands_test.py tests/geo_commands_test.py tests/hash_commands_test.py tests/hyperloglog_commands_test.py tests/integration_test.py tests/list_commands_test.py tests/locks_test.py tests/multi_exec_test.py tests/parse_url_test.py tests/pool_test.py tests/pubsub_commands_test.py tests/pubsub_receiver_test.py tests/pyreader_test.py tests/scripting_commands_test.py tests/sentinel_commands_test.py tests/sentinel_failover_test.py tests/server_commands_test.py tests/set_commands_test.py tests/sorted_set_commands_test.py tests/ssl_test.py tests/stream_test.py tests/string_commands_test.py tests/task_cancellation.py tests/task_cancellation_test.py tests/transaction_commands_test.pyaioredis-1.0.0/aioredis.egg-info/requires.txt0000644000175000017500000000002613203634127021777 0ustar alexeyalexey00000000000000async-timeout hiredis aioredis-1.0.0/aioredis.egg-info/dependency_links.txt0000644000175000017500000000000113203634127023447 0ustar alexeyalexey00000000000000 aioredis-1.0.0/aioredis.egg-info/top_level.txt0000644000175000017500000000001113203634127022123 0ustar alexeyalexey00000000000000aioredis aioredis-1.0.0/aioredis.egg-info/PKG-INFO0000644000175000017500000003245213203634127020504 0ustar alexeyalexey00000000000000Metadata-Version: 1.1 Name: aioredis Version: 1.0.0 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka Author-email: alexey.popravka@horsedevel.com License: MIT Description-Content-Type: UNKNOWN Description: aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://codecov.io/gh/aio-libs/aioredis/branch/master/graph/badge.svg :target: https://codecov.io/gh/aio-libs/aioredis .. image:: https://ci.appveyor.com/api/projects/status/wngyx6s98o6hsxmt/branch/master?svg=true :target: https://ci.appveyor.com/project/popravich/aioredis Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser Yes Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes SSL/TLS support Yes Sentinel support Yes [1]_ Redis Cluster support WIP Trollius (python 2.7) No Tested CPython versions `3.5, 3.6 `_ [2]_ Tested PyPy3 versions `5.9.0 `_ Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ Support for dev Redis server through low-level API ================================ ============================== .. [1] Sentinel support is available in master branch. This feature is not yet stable and may have some issues. .. [2] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): conn = await aioredis.create_connection( 'redis://localhost', loop=loop) await conn.execute('set', 'my-key', 'value') val = await conn.execute('get', 'my-key') print(val) conn.close() await conn.wait_closed() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): redis = await aioredis.create_redis( 'redis://localhost', loop=loop) await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) redis.close() await redis.wait_closed() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): pool = await aioredis.create_pool( 'redis://localhost', minsize=5, maxsize=10, loop=loop) await pool.execute('set', 'my-key', 'value') print(await pool.execute('get', 'my-key')) # graceful shutdown pool.close() await pool.wait_closed() loop.run_until_complete(go()) Requirements ------------ * Python_ 3.5.3+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python protocol parser is implemented as well and can be used through ``parser`` parameter. Benchmarks ---------- Benchmarks can be found here: https://github.com/popravich/python-redis-benchmark Discussion list --------------- *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs Or gitter room: https://gitter.im/aio-libs/Lobby License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _hiredis: https://pypi.python.org/pypi/hiredis .. _travis: https://travis-ci.org/aio-libs/aioredis Changes ------- 1.0.0 (2017-11-17) ^^^^^^^^^^^^^^^^^^ **NEW**: * **Important!** Drop Python 3.3, 3.4 support; (see `#321 `_, `#323 `_ and `#326 `_); * **Important!** Connections pool has been refactored; now ``create_redis`` function will yield ``Redis`` instance instead of ``RedisPool`` (see `#129 `_); * **Important!** Change sorted set commands reply format: return list of tuples instead of plain list for commands accepting ``withscores`` argument (see `#334 `_); * **Important!** Change ``hscan`` command reply format: return list of tuples instead of mixed key-value list (see `#335 `_); * Implement Redis URI support as supported ``address`` argument value (see `#322 `_); * Dropped ``create_reconnecting_redis``, ``create_redis_pool`` should be used instead; * Implement custom ``StreamReader`` (see `#273 `_); * Implement Sentinel support (see `#181 `_); * Implement pure-python parser (see `#212 `_); * Add ``migrate_keys`` command (see `#187 `_); * Add ``zrevrangebylex`` command (see `#201 `_); * Add ``command``, ``command_count``, ``command_getkeys`` and ``command_info`` commands (see `#229 `_); * Add ``ping`` support in pubsub connection (see `#264 `_); * Add ``exist`` parameter to ``zadd`` command (see `#288 `_); * Add ``MaxClientsError`` and implement ``ReplyError`` specialization (see `#325 `_); * Add ``encoding`` parameter to sorted set commands (see `#289 `_); **FIX**: * Fix ``CancelledError`` in ``conn._reader_task`` (see `#301 `_); * Fix pending commands cancellation with ``CancelledError``, use explicit exception instead of calling ``cancel()`` method (see `#316 `_); * Correct error message on Sentinel discovery of master/slave with password (see `#327 `_); * Fix ``bytearray`` support as command argument (see `#329 `_); * Fix critical bug in patched asyncio.Lock (see `#256 `_); * Fix Multi/Exec transaction canceled error (see `#225 `_); * Add missing arguments to ``create_redis`` and ``create_redis_pool``; * Fix deprecation warning (see `#191 `_); * Make correct ``__aiter__()`` (see `#192 `_); * Backward compatibility fix for ``with (yield from pool) as conn:`` (see `#205 `_); * Fixed pubsub receiver stop() (see `#211 `_); **MISC**: * Multiple test fixes; * Add PyPy3 to build matrix; * Update dependencies versions; * Add missing Python 3.6 classifier; 0.3.5 (2017-11-08) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix for indistinguishable futures cancellation with ``asyncio.CancelledError`` (see `#316 `_), cherry-picked from master; 0.3.4 (2017-10-25) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix time command result decoding when using connection-wide encoding setting (see `#266 `_); 0.3.3 (2017-06-30) ^^^^^^^^^^^^^^^^^^ **FIX**: * Critical bug fixed in patched asyncio.Lock (see `#256 `_); 0.3.2 (2017-06-21) ^^^^^^^^^^^^^^^^^^ **NEW**: * Added ``zrevrangebylex`` command (see `#201 `_), cherry-picked from master; * Add connection timeout (see `#221 `_), cherry-picked from master; **FIX**: * Fixed pool close warning (see `#239 `_ and `#236 `_), cherry-picked from master; * Fixed asyncio Lock deadlock issue (see `#231 `_ and `#241 `_); 0.3.1 (2017-05-09) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix pubsub Receiver missing iter() method (see `#203 `_); 0.3.0 (2017-01-11) ^^^^^^^^^^^^^^^^^^ **NEW**: * Pub/Sub connection commands accept ``Channel`` instances (see `#168 `_); * Implement new Pub/Sub MPSC (multi-producers, single-consumer) Queue -- ``aioredis.pubsub.Receiver`` (see `#176 `_); * Add ``aioredis.abc`` module providing abstract base classes defining interface for basic lib components; (see `#176 `_); * Implement Geo commands support (see `#177 `_ and `#179 `_); **FIX**: * Minor tests fixes; **MISC**: * Update examples and docs to use ``async``/``await`` syntax also keeping ``yield from`` examples for history (see `#173 `_); * Reflow Travis CI configuration; add Python 3.6 section (see `#170 `_); * Add AppVeyor integration to run tests on Windows (see `#180 `_); * Update multiple development requirements; Platform: POSIX Classifier: License :: OSI Approved :: MIT License Classifier: Development Status :: 4 - Beta Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Operating System :: POSIX Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries Classifier: Framework :: AsyncIO aioredis-1.0.0/tests/0000755000175000017500000000000013203634127015252 5ustar alexeyalexey00000000000000aioredis-1.0.0/tests/coerced_keys_dict_test.py0000664000175000017500000000226113012410141022312 0ustar alexeyalexey00000000000000import pytest from aioredis.util import coerced_keys_dict def test_simple(): d = coerced_keys_dict() assert d == {} d = coerced_keys_dict({b'a': 'b', b'c': 'd'}) assert 'a' in d assert b'a' in d assert 'c' in d assert b'c' in d assert d == {b'a': 'b', b'c': 'd'} def test_invalid_init(): d = coerced_keys_dict({'foo': 'bar'}) assert d == {'foo': 'bar'} assert 'foo' not in d assert b'foo' not in d with pytest.raises(KeyError): d['foo'] with pytest.raises(KeyError): d[b'foo'] d = coerced_keys_dict() d.update({'foo': 'bar'}) assert d == {'foo': 'bar'} assert 'foo' not in d assert b'foo' not in d with pytest.raises(KeyError): d['foo'] with pytest.raises(KeyError): d[b'foo'] def test_valid_init(): d = coerced_keys_dict({b'foo': 'bar'}) assert d == {b'foo': 'bar'} assert 'foo' in d assert b'foo' in d assert d['foo'] == 'bar' assert d[b'foo'] == 'bar' d = coerced_keys_dict() d.update({b'foo': 'bar'}) assert d == {b'foo': 'bar'} assert 'foo' in d assert b'foo' in d assert d['foo'] == 'bar' assert d[b'foo'] == 'bar' aioredis-1.0.0/tests/multi_exec_test.py0000644000175000017500000000205213203624357021024 0ustar alexeyalexey00000000000000import asyncio from unittest import mock from aioredis.commands import MultiExec from aioredis.commands import Redis def test_global_loop(): conn = mock.Mock(spec=( 'execute closed _transaction_error' .split())) try: old_loop = asyncio.get_event_loop() except (AssertionError, RuntimeError): old_loop = None loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) tr = MultiExec(conn, commands_factory=Redis) assert tr._loop is loop def make_fut(cmd, *args, **kw): fut = asyncio.get_event_loop().create_future() if cmd == 'PING': fut.set_result(b'QUEUED') elif cmd == 'EXEC': fut.set_result([b'PONG']) else: fut.set_result(b'OK') return fut conn.execute.side_effect = make_fut conn.closed = False conn._transaction_error = None async def go(): tr.ping() res = await tr.execute() assert res == [b'PONG'] loop.run_until_complete(go()) asyncio.set_event_loop(old_loop) aioredis-1.0.0/tests/conftest.py0000644000175000017500000005373413203624357017471 0ustar alexeyalexey00000000000000import asyncio import pytest import socket import subprocess import sys import contextlib import os import ssl import time import logging import tempfile import atexit from collections import namedtuple from urllib.parse import urlencode, urlunparse from async_timeout import timeout as async_timeout import aioredis import aioredis.sentinel TCPAddress = namedtuple('TCPAddress', 'host port') RedisServer = namedtuple('RedisServer', 'name tcp_address unixsocket version password') SentinelServer = namedtuple('SentinelServer', 'name tcp_address unixsocket version masters') # Public fixtures @pytest.yield_fixture def loop(): """Creates new event loop.""" loop = asyncio.new_event_loop() asyncio.set_event_loop(None) try: yield loop finally: if hasattr(loop, 'is_closed'): closed = loop.is_closed() else: closed = loop._closed # XXX if not closed: loop.call_soon(loop.stop) loop.run_forever() loop.close() @pytest.fixture(scope='session') def unused_port(): """Gets random free port.""" def fun(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('127.0.0.1', 0)) return s.getsockname()[1] return fun @pytest.fixture def create_connection(_closable, loop): """Wrapper around aioredis.create_connection.""" async def f(*args, **kw): kw.setdefault('loop', loop) conn = await aioredis.create_connection(*args, **kw) _closable(conn) return conn return f @pytest.fixture(params=[ aioredis.create_redis, aioredis.create_redis_pool], ids=['single', 'pool']) def create_redis(_closable, loop, request): """Wrapper around aioredis.create_redis.""" factory = request.param async def f(*args, **kw): kw.setdefault('loop', loop) redis = await factory(*args, **kw) _closable(redis) return redis return f @pytest.fixture def create_pool(_closable, loop): """Wrapper around aioredis.create_pool.""" async def f(*args, **kw): kw.setdefault('loop', loop) redis = await aioredis.create_pool(*args, **kw) _closable(redis) return redis return f @pytest.fixture def create_sentinel(_closable, loop): """Helper instantiating RedisSentinel client.""" async def f(*args, **kw): kw.setdefault('loop', loop) client = await aioredis.sentinel.create_sentinel(*args, **kw) _closable(client) return client return f @pytest.fixture def pool(create_pool, server, loop): """Returns RedisPool instance.""" pool = loop.run_until_complete( create_pool(server.tcp_address, loop=loop)) return pool @pytest.fixture def redis(create_redis, server, loop): """Returns Redis client instance.""" redis = loop.run_until_complete( create_redis(server.tcp_address, loop=loop)) loop.run_until_complete(redis.flushall()) return redis @pytest.fixture def redis_sentinel(create_sentinel, sentinel, loop): """Returns Redis Sentinel client instance.""" redis_sentinel = loop.run_until_complete( create_sentinel([sentinel.tcp_address], loop=loop)) assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG' return redis_sentinel @pytest.yield_fixture def _closable(loop): conns = [] try: yield conns.append finally: waiters = [] while conns: conn = conns.pop(0) conn.close() waiters.append(conn.wait_closed()) if waiters: loop.run_until_complete(asyncio.gather(*waiters, loop=loop)) @pytest.fixture(scope='session') def server(start_server): """Starts redis-server instance.""" return start_server('A') @pytest.fixture(scope='session') def serverB(start_server): """Starts redis-server instance.""" return start_server('B') @pytest.fixture(scope='session') def sentinel(start_sentinel, request, start_server): """Starts redis-sentinel instance with one master -- masterA.""" # Adding master+slave for normal (no failover) tests: master_no_fail = start_server('master-no-fail') start_server('slave-no-fail', slaveof=master_no_fail) # Adding master+slave for failover test; masterA = start_server('masterA') start_server('slaveA', slaveof=masterA) return start_sentinel('main', masterA, master_no_fail) @pytest.fixture(params=['path', 'query']) def server_tcp_url(server, request): def make(**kwargs): netloc = '{0.host}:{0.port}'.format(server.tcp_address) path = '' if request.param == 'path': if 'password' in kwargs: netloc = ':{0}@{1.host}:{1.port}'.format( kwargs.pop('password'), server.tcp_address) if 'db' in kwargs: path = '/{}'.format(kwargs.pop('db')) query = urlencode(kwargs) return urlunparse(('redis', netloc, path, '', query, '')) return make @pytest.fixture def server_unix_url(server): def make(**kwargs): query = urlencode(kwargs) return urlunparse(('unix', '', server.unixsocket, '', query, '')) return make # Internal stuff # def pytest_addoption(parser): parser.addoption('--redis-server', default=[], action="append", help="Path to redis-server executable," " defaults to `%(default)s`") parser.addoption('--ssl-cafile', default='tests/ssl/cafile.crt', help="Path to testing SSL CA file") parser.addoption('--ssl-dhparam', default='tests/ssl/dhparam.pem', help="Path to testing SSL DH params file") parser.addoption('--ssl-cert', default='tests/ssl/cert.pem', help="Path to testing SSL CERT file") parser.addoption('--uvloop', default=False, action='store_true', help="Run tests with uvloop") def _read_server_version(redis_bin): args = [redis_bin, '--version'] with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: version = proc.stdout.readline().decode('utf-8') for part in version.split(): if part.startswith('v='): break else: raise RuntimeError( "No version info can be found in {}".format(version)) return tuple(map(int, part[2:].split('.'))) @contextlib.contextmanager def config_writer(path): with open(path, 'wt') as f: def write(*args): print(*args, file=f) yield write REDIS_SERVERS = [] VERSIONS = {} def format_version(srv): return 'redis_v{}'.format('.'.join(map(str, VERSIONS[srv]))) @pytest.fixture(scope='session', params=REDIS_SERVERS, ids=format_version) def server_bin(request): """Common for start_server and start_sentinel server bin path parameter. """ return request.param @pytest.fixture(scope='session') def start_server(_proc, request, unused_port, server_bin): """Starts Redis server instance. Caches instances by name. ``name`` param -- instance alias ``config_lines`` -- optional list of config directives to put in config (if no config_lines passed -- no config will be generated, for backward compatibility). """ version = _read_server_version(server_bin) verbose = request.config.getoption('-v') > 3 servers = {} def timeout(t): end = time.time() + t while time.time() <= end: yield True raise RuntimeError("Redis startup timeout expired") def maker(name, config_lines=None, *, slaveof=None, password=None): assert slaveof is None or isinstance(slaveof, RedisServer), slaveof if name in servers: return servers[name] port = unused_port() tcp_address = TCPAddress('localhost', port) if sys.platform == 'win32': unixsocket = None else: unixsocket = '/tmp/aioredis.{}.sock'.format(port) dumpfile = 'dump-{}.rdb'.format(port) data_dir = tempfile.gettempdir() dumpfile_path = os.path.join(data_dir, dumpfile) stdout_file = os.path.join(data_dir, 'aioredis.{}.stdout'.format(port)) tmp_files = [dumpfile_path, stdout_file] if config_lines: config = os.path.join(data_dir, 'aioredis.{}.conf'.format(port)) with config_writer(config) as write: write('daemonize no') write('save ""') write('dir ', data_dir) write('dbfilename', dumpfile) write('port', port) if unixsocket: write('unixsocket', unixsocket) tmp_files.append(unixsocket) if password: write('requirepass "{}"'.format(password)) write('# extra config') for line in config_lines: write(line) if slaveof is not None: write("slaveof {0.tcp_address.host} {0.tcp_address.port}" .format(slaveof)) if password: write('masterauth "{}"'.format(password)) args = [config] tmp_files.append(config) else: args = ['--daemonize', 'no', '--save', '""', '--dir', data_dir, '--dbfilename', dumpfile, '--port', str(port), ] if unixsocket: args += [ '--unixsocket', unixsocket, ] if password: args += [ '--requirepass "{}"'.format(password) ] if slaveof is not None: args += [ '--slaveof', str(slaveof.tcp_address.host), str(slaveof.tcp_address.port), ] if password: args += [ '--masterauth "{}"'.format(password) ] f = open(stdout_file, 'w') atexit.register(f.close) proc = _proc(server_bin, *args, stdout=f, stderr=subprocess.STDOUT, _clear_tmp_files=tmp_files) with open(stdout_file, 'rt') as f: for _ in timeout(10): assert proc.poll() is None, ( "Process terminated", proc.returncode) log = f.readline() if log and verbose: print(name, ":", log, end='') if 'The server is now ready to accept connections ' in log: break if slaveof is not None: for _ in timeout(10): log = f.readline() if log and verbose: print(name, ":", log, end='') if 'sync: Finished with success' in log: break info = RedisServer(name, tcp_address, unixsocket, version, password) servers.setdefault(name, info) return info return maker @pytest.fixture(scope='session') def start_sentinel(_proc, request, unused_port, server_bin): """Starts Redis Sentinel instances.""" version = _read_server_version(server_bin) verbose = request.config.getoption('-v') > 3 sentinels = {} def timeout(t): end = time.time() + t while time.time() <= end: yield True raise RuntimeError("Redis startup timeout expired") def maker(name, *masters, quorum=1, noslaves=False): key = (name,) + masters if key in sentinels: return sentinels[key] port = unused_port() tcp_address = TCPAddress('localhost', port) data_dir = tempfile.gettempdir() config = os.path.join( data_dir, 'aioredis-sentinel.{}.conf'.format(port)) stdout_file = os.path.join( data_dir, 'aioredis-sentinel.{}.stdout'.format(port)) tmp_files = [config, stdout_file] if sys.platform == 'win32': unixsocket = None else: unixsocket = os.path.join( data_dir, 'aioredis-sentinel.{}.sock'.format(port)) tmp_files.append(unixsocket) with config_writer(config) as write: write('daemonize no') write('save ""') write('port', port) if unixsocket: write('unixsocket', unixsocket) write('loglevel debug') for master in masters: write('sentinel monitor', master.name, '127.0.0.1', master.tcp_address.port, quorum) write('sentinel down-after-milliseconds', master.name, '3000') write('sentinel failover-timeout', master.name, '3000') write('sentinel auth-pass', master.name, master.password) f = open(stdout_file, 'w') atexit.register(f.close) proc = _proc(server_bin, config, '--sentinel', stdout=f, stderr=subprocess.STDOUT, _clear_tmp_files=tmp_files) # XXX: wait sentinel see all masters and slaves; all_masters = {m.name for m in masters} if noslaves: all_slaves = {} else: all_slaves = {m.name for m in masters} with open(stdout_file, 'rt') as f: for _ in timeout(30): assert proc.poll() is None, ( "Process terminated", proc.returncode) log = f.readline() if log and verbose: print(name, ":", log, end='') for m in masters: if '# +monitor master {}'.format(m.name) in log: all_masters.discard(m.name) if '* +slave slave' in log and \ '@ {}'.format(m.name) in log: all_slaves.discard(m.name) if not all_masters and not all_slaves: break else: raise RuntimeError("Could not start Sentinel") masters = {m.name: m for m in masters} info = SentinelServer(name, tcp_address, unixsocket, version, masters) sentinels.setdefault(key, info) return info return maker @pytest.fixture(scope='session') def ssl_proxy(_proc, request, unused_port): by_port = {} cafile = os.path.abspath(request.config.getoption('--ssl-cafile')) certfile = os.path.abspath(request.config.getoption('--ssl-cert')) dhfile = os.path.abspath(request.config.getoption('--ssl-dhparam')) assert os.path.exists(cafile), \ "Missing SSL CA file, run `make certificate` to generate new one" assert os.path.exists(certfile), \ "Missing SSL CERT file, run `make certificate` to generate new one" assert os.path.exists(dhfile), \ "Missing SSL DH params, run `make certificate` to generate new one" ssl_ctx = ssl.create_default_context(cafile=cafile) ssl_ctx.check_hostname = False ssl_ctx.verify_mode = ssl.CERT_NONE ssl_ctx.load_dh_params(dhfile) def sockat(unsecure_port): if unsecure_port in by_port: return by_port[unsecure_port] secure_port = unused_port() _proc('/usr/bin/socat', 'openssl-listen:{port},' 'dhparam={param},' 'cert={cert},verify=0,fork' .format(port=secure_port, param=dhfile, cert=certfile), 'tcp-connect:localhost:{}' .format(unsecure_port) ) time.sleep(1) # XXX by_port[unsecure_port] = secure_port, ssl_ctx return secure_port, ssl_ctx return sockat @pytest.yield_fixture(scope='session') def _proc(): processes = [] tmp_files = set() def run(*commandline, _clear_tmp_files=(), **kwargs): proc = subprocess.Popen(commandline, **kwargs) processes.append(proc) tmp_files.update(_clear_tmp_files) return proc try: yield run finally: while processes: proc = processes.pop(0) proc.terminate() proc.wait() for path in tmp_files: try: os.remove(path) except OSError: pass @pytest.mark.tryfirst def pytest_pycollect_makeitem(collector, name, obj): if collector.funcnamefilter(name): if not callable(obj): return item = pytest.Function(name, parent=collector) if 'run_loop' in item.keywords: # TODO: re-wrap with asyncio.coroutine if not native coroutine return list(collector._genfunctions(name, obj)) @pytest.mark.tryfirst def pytest_pyfunc_call(pyfuncitem): """ Run asyncio marked test functions in an event loop instead of a normal function call. """ if 'run_loop' in pyfuncitem.keywords: marker = pyfuncitem.keywords['run_loop'] funcargs = pyfuncitem.funcargs loop = funcargs['loop'] testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} loop.run_until_complete( _wait_coro(pyfuncitem.obj, testargs, timeout=marker.kwargs.get('timeout', 15), loop=loop)) return True async def _wait_coro(corofunc, kwargs, timeout, loop): with async_timeout(timeout, loop=loop): return (await corofunc(**kwargs)) def pytest_runtest_setup(item): if 'run_loop' in item.keywords and 'loop' not in item.fixturenames: # inject an event loop fixture for all async tests item.fixturenames.append('loop') def pytest_ignore_collect(path, config): if 'py35' in str(path): if sys.version_info < (3, 5, 0): return True def pytest_collection_modifyitems(session, config, items): for item in items: if 'redis_version' in item.keywords: marker = item.keywords['redis_version'] try: version = VERSIONS[item.callspec.getparam('server_bin')] except (KeyError, ValueError, AttributeError): # TODO: throw noisy warning continue if version < marker.kwargs['version']: item.add_marker(pytest.mark.skip( reason=marker.kwargs['reason'])) if 'ssl_proxy' in item.fixturenames: item.add_marker(pytest.mark.skipif( "not os.path.exists('/usr/bin/socat')", reason="socat package required (apt-get install socat)")) def pytest_configure(config): bins = config.getoption('--redis-server')[:] REDIS_SERVERS[:] = bins or ['/usr/bin/redis-server'] VERSIONS.update({srv: _read_server_version(srv) for srv in REDIS_SERVERS}) assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS) if config.getoption('--uvloop'): try: import uvloop except ImportError: raise RuntimeError( "Can not import uvloop, make sure it is installed") asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def logs(logger, level=None): """Catches logs for given logger and level. See unittest.TestCase.assertLogs for details. """ return _AssertLogsContext(logger, level) _LoggingWatcher = namedtuple("_LoggingWatcher", ["records", "output"]) class _CapturingHandler(logging.Handler): """ A logging handler capturing all (raw and formatted) logging output. """ def __init__(self): logging.Handler.__init__(self) self.watcher = _LoggingWatcher([], []) def flush(self): pass def emit(self, record): self.watcher.records.append(record) msg = self.format(record) self.watcher.output.append(msg) class _AssertLogsContext: """Standard unittest's _AssertLogsContext context manager adopted to raise pytest failure. """ LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s" def __init__(self, logger_name, level): self.logger_name = logger_name if level: self.level = level else: self.level = logging.INFO self.msg = None def __enter__(self): if isinstance(self.logger_name, logging.Logger): logger = self.logger = self.logger_name else: logger = self.logger = logging.getLogger(self.logger_name) formatter = logging.Formatter(self.LOGGING_FORMAT) handler = _CapturingHandler() handler.setFormatter(formatter) self.watcher = handler.watcher self.old_handlers = logger.handlers[:] self.old_level = logger.level self.old_propagate = logger.propagate logger.handlers = [handler] logger.setLevel(self.level) logger.propagate = False return handler.watcher def __exit__(self, exc_type, exc_value, tb): self.logger.handlers = self.old_handlers self.logger.propagate = self.old_propagate self.logger.setLevel(self.old_level) if exc_type is not None: # let unexpected exceptions pass through return False if len(self.watcher.records) == 0: pytest.fail( "no logs of level {} or higher triggered on {}" .format(logging.getLevelName(self.level), self.logger.name)) def redis_version(*version, reason): assert 1 < len(version) <= 3, version assert all(isinstance(v, int) for v in version), version return pytest.mark.redis_version(version=version, reason=reason) def assert_almost_equal(first, second, places=None, msg=None, delta=None): assert not (places is None and delta is None), \ "Both places and delta are not set, please set one" if delta is not None: assert abs(first - second) <= delta else: assert round(abs(first - second), places) == 0 def pytest_namespace(): return { 'assert_almost_equal': assert_almost_equal, 'redis_version': redis_version, 'logs': logs, } aioredis-1.0.0/tests/scripting_commands_test.py0000644000175000017500000000702413203624357022555 0ustar alexeyalexey00000000000000import pytest import asyncio from aioredis import ReplyError @pytest.mark.run_loop async def test_eval(redis): await redis.delete('key:eval', 'value:eval') script = "return 42" res = await redis.eval(script) assert res == 42 key, value = b'key:eval', b'value:eval' script = """ if redis.call('setnx', KEYS[1], ARGV[1]) == 1 then return 'foo' else return 'bar' end """ res = await redis.eval(script, keys=[key], args=[value]) assert res == b'foo' res = await redis.eval(script, keys=[key], args=[value]) assert res == b'bar' script = "return 42" with pytest.raises(TypeError): await redis.eval(script, keys='not:list') with pytest.raises(TypeError): await redis.eval(script, keys=['valid', None]) with pytest.raises(TypeError): await redis.eval(script, args=['valid', None]) with pytest.raises(TypeError): await redis.eval(None) @pytest.mark.run_loop async def test_evalsha(redis): script = b"return 42" sha_hash = await redis.script_load(script) assert len(sha_hash) == 40 res = await redis.evalsha(sha_hash) assert res == 42 key, arg1, arg2 = b'key:evalsha', b'1', b'2' script = "return {KEYS[1], ARGV[1], ARGV[2]}" sha_hash = await redis.script_load(script) res = await redis.evalsha(sha_hash, [key], [arg1, arg2]) assert res == [key, arg1, arg2] with pytest.raises(ReplyError): await redis.evalsha(b'wrong sha hash') with pytest.raises(TypeError): await redis.evalsha(sha_hash, keys=['valid', None]) with pytest.raises(TypeError): await redis.evalsha(sha_hash, args=['valid', None]) with pytest.raises(TypeError): await redis.evalsha(None) @pytest.mark.run_loop async def test_script_exists(redis): sha_hash1 = await redis.script_load(b'return 1') sha_hash2 = await redis.script_load(b'return 2') assert len(sha_hash1) == 40 assert len(sha_hash2) == 40 res = await redis.script_exists(sha_hash1, sha_hash1) assert res == [1, 1] no_sha = b'ffffffffffffffffffffffffffffffffffffffff' res = await redis.script_exists(no_sha) assert res == [0] with pytest.raises(TypeError): await redis.script_exists(None) with pytest.raises(TypeError): await redis.script_exists('123', None) @pytest.mark.run_loop async def test_script_flush(redis): sha_hash1 = await redis.script_load(b'return 1') assert len(sha_hash1) == 40 res = await redis.script_exists(sha_hash1) assert res == [1] res = await redis.script_flush() assert res is True res = await redis.script_exists(sha_hash1) assert res == [0] @pytest.mark.run_loop async def test_script_load(redis): sha_hash1 = await redis.script_load(b'return 1') sha_hash2 = await redis.script_load(b'return 2') assert len(sha_hash1) == 40 assert len(sha_hash2) == 40 res = await redis.script_exists(sha_hash1, sha_hash1) assert res == [1, 1] @pytest.mark.run_loop async def test_script_kill(create_redis, loop, server, redis): script = "while (1) do redis.call('TIME') end" other_redis = await create_redis( server.tcp_address, loop=loop) ok = await redis.set('key1', 'value') assert ok is True fut = other_redis.eval(script, keys=['non-existent-key'], args=[10]) await asyncio.sleep(0.1, loop=loop) resp = await redis.script_kill() assert resp is True with pytest.raises(ReplyError): await fut with pytest.raises(ReplyError): await redis.script_kill() aioredis-1.0.0/tests/sentinel_commands_test.py0000644000175000017500000002133413203624357022374 0ustar alexeyalexey00000000000000import asyncio import pytest import sys from aioredis import RedisError, ReplyError, PoolClosedError from aioredis.errors import MasterReplyError from aioredis.sentinel.commands import RedisSentinel pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") if sys.platform == 'win32': pytestmark = pytest.mark.skip(reason="unstable on windows") @pytest.mark.run_loop async def test_client_close(redis_sentinel): assert isinstance(redis_sentinel, RedisSentinel) assert not redis_sentinel.closed redis_sentinel.close() assert redis_sentinel.closed with pytest.raises(PoolClosedError): assert (await redis_sentinel.ping()) != b'PONG' await redis_sentinel.wait_closed() @pytest.mark.run_loop async def test_global_loop(sentinel, create_sentinel, loop): asyncio.set_event_loop(loop) # force global loop client = await create_sentinel([sentinel.tcp_address], loop=None) assert client._pool._loop is loop asyncio.set_event_loop(None) @pytest.mark.run_loop async def test_ping(redis_sentinel): assert b'PONG' == (await redis_sentinel.ping()) @pytest.mark.run_loop async def test_master_info(redis_sentinel, sentinel): info = await redis_sentinel.master('master-no-fail') assert isinstance(info, dict) assert info['name'] == 'master-no-fail' assert 'slave' not in info['flags'] assert 's_down' not in info['flags'] assert 'o_down' not in info['flags'] assert 'sentinel' not in info['flags'] assert 'disconnected' not in info['flags'] assert 'master' in info['flags'] for key in ['num-other-sentinels', 'flags', 'quorum', 'ip', 'failover-timeout', 'runid', 'info-refresh', 'config-epoch', 'parallel-syncs', 'role-reported-time', 'last-ok-ping-reply', 'last-ping-reply', 'last-ping-sent', 'name', 'down-after-milliseconds', 'num-slaves', 'port', 'role-reported']: assert key in info if sentinel.version < (3, 2, 0): assert 'pending-commands' in info else: assert 'link-pending-commands' in info assert 'link-refcount' in info @pytest.mark.run_loop async def test_master__auth(create_sentinel, start_sentinel, start_server, loop): master = start_server('master_1', password='123') start_server('slave_1', slaveof=master, password='123') sentinel = start_sentinel('auth_sentinel_1', master) client1 = await create_sentinel( [sentinel.tcp_address], password='123', loop=loop) client2 = await create_sentinel( [sentinel.tcp_address], password='111', loop=loop) client3 = await create_sentinel( [sentinel.tcp_address], loop=loop) m1 = client1.master_for(master.name) await m1.set('mykey', 'myval') with pytest.raises(MasterReplyError) as exc_info: m2 = client2.master_for(master.name) await m2.set('mykey', 'myval') assert str(exc_info.value) == ( "('Service master_1 error', AuthError('ERR invalid password',))") with pytest.raises(MasterReplyError): m3 = client3.master_for(master.name) await m3.set('mykey', 'myval') @pytest.mark.run_loop async def test_master__no_auth(create_sentinel, sentinel, loop): client = await create_sentinel( [sentinel.tcp_address], password='123', loop=loop) master = client.master_for('masterA') with pytest.raises(MasterReplyError): await master.set('mykey', 'myval') @pytest.mark.run_loop async def test_master__unknown(redis_sentinel): with pytest.raises(ReplyError): await redis_sentinel.master('unknown-master') @pytest.mark.run_loop async def test_master_address(redis_sentinel, sentinel): _, port = await redis_sentinel.master_address('master-no-fail') assert port == sentinel.masters['master-no-fail'].tcp_address.port @pytest.mark.run_loop async def test_master_address__unknown(redis_sentinel): res = await redis_sentinel.master_address('unknown-master') assert res is None @pytest.mark.run_loop async def test_masters(redis_sentinel): masters = await redis_sentinel.masters() assert isinstance(masters, dict) assert len(masters) >= 1, "At least on masters expected" assert 'master-no-fail' in masters assert isinstance(masters['master-no-fail'], dict) @pytest.mark.run_loop async def test_slave_info(sentinel, redis_sentinel): info = await redis_sentinel.slaves('master-no-fail') assert len(info) == 1 info = info[0] assert isinstance(info, dict) assert 'master' not in info['flags'] assert 's_down' not in info['flags'] assert 'o_down' not in info['flags'] assert 'sentinel' not in info['flags'] # assert 'disconnected' not in info['flags'] assert 'slave' in info['flags'] keys_set = { 'flags', 'master-host', 'master-link-down-time', 'master-link-status', 'master-port', 'name', 'slave-priority', 'ip', 'runid', 'info-refresh', 'role-reported-time', 'last-ok-ping-reply', 'last-ping-reply', 'last-ping-sent', 'down-after-milliseconds', 'port', 'role-reported', } if sentinel.version < (3, 2, 0): keys_set.add('pending-commands') else: keys_set.add('link-pending-commands') keys_set.add('link-refcount') missing = keys_set - set(info) assert not missing @pytest.mark.run_loop async def test_slave__unknown(redis_sentinel): with pytest.raises(ReplyError): await redis_sentinel.slaves('unknown-master') @pytest.mark.run_loop async def test_sentinels_empty(redis_sentinel): res = await redis_sentinel.sentinels('master-no-fail') assert res == [] with pytest.raises(ReplyError): await redis_sentinel.sentinels('unknown-master') @pytest.mark.run_loop(timeout=30) async def test_sentinels__exist(create_sentinel, start_sentinel, start_server, loop): m1 = start_server('master-two-sentinels') s1 = start_sentinel('peer-sentinel-1', m1, quorum=2, noslaves=True) s2 = start_sentinel('peer-sentinel-2', m1, quorum=2, noslaves=True) redis_sentinel = await create_sentinel( [s1.tcp_address, s2.tcp_address]) while True: info = await redis_sentinel.master('master-two-sentinels') if info['num-other-sentinels'] > 0: break await asyncio.sleep(.2, loop=loop) info = await redis_sentinel.sentinels('master-two-sentinels') assert len(info) == 1 assert 'sentinel' in info[0]['flags'] assert info[0]['port'] in (s1.tcp_address.port, s2.tcp_address.port) @pytest.mark.run_loop async def test_ckquorum(redis_sentinel): assert (await redis_sentinel.check_quorum('master-no-fail')) # change quorum assert (await redis_sentinel.set('master-no-fail', 'quorum', 2)) with pytest.raises(RedisError): await redis_sentinel.check_quorum('master-no-fail') assert (await redis_sentinel.set('master-no-fail', 'quorum', 1)) assert (await redis_sentinel.check_quorum('master-no-fail')) @pytest.mark.run_loop async def test_set_option(redis_sentinel): assert (await redis_sentinel.set('master-no-fail', 'quorum', 10)) master = await redis_sentinel.master('master-no-fail') assert master['quorum'] == 10 assert (await redis_sentinel.set('master-no-fail', 'quorum', 1)) master = await redis_sentinel.master('master-no-fail') assert master['quorum'] == 1 with pytest.raises(ReplyError): await redis_sentinel.set('masterA', 'foo', 'bar') @pytest.mark.run_loop async def test_sentinel_role(sentinel, create_redis, loop): redis = await create_redis(sentinel.tcp_address, loop=loop) info = await redis.role() assert info.role == 'sentinel' assert isinstance(info.masters, list) assert 'master-no-fail' in info.masters @pytest.mark.run_loop(timeout=30) async def test_remove(redis_sentinel, start_server, loop): m1 = start_server('master-to-remove') ok = await redis_sentinel.monitor( m1.name, '127.0.0.1', m1.tcp_address.port, 1) assert ok ok = await redis_sentinel.remove(m1.name) assert ok with pytest.raises(ReplyError): await redis_sentinel.remove('unknown-master') @pytest.mark.run_loop(timeout=30) async def test_monitor(redis_sentinel, start_server, loop, unused_port): m1 = start_server('master-to-monitor') ok = await redis_sentinel.monitor( m1.name, '127.0.0.1', m1.tcp_address.port, 1) assert ok _, port = await redis_sentinel.master_address('master-to-monitor') assert port == m1.tcp_address.port aioredis-1.0.0/tests/stream_test.py0000644000175000017500000000231713203624357020165 0ustar alexeyalexey00000000000000import pytest from aioredis.stream import StreamReader from aioredis.parser import PyReader from aioredis.errors import ( ProtocolError, ReplyError ) @pytest.fixture def reader(loop): reader = StreamReader(loop=loop) reader.set_parser( PyReader(protocolError=ProtocolError, replyError=ReplyError) ) return reader @pytest.mark.run_loop async def test_feed_and_parse(reader): reader.feed_data(b'+PONG\r\n') assert (await reader.readobj()) == b'PONG' @pytest.mark.run_loop async def test_buffer_available_after_RST(reader): reader.feed_data(b'+PONG\r\n') reader.set_exception(Exception()) assert (await reader.readobj()) == b'PONG' with pytest.raises(Exception): await reader.readobj() def test_feed_with_eof(reader): reader.feed_eof() with pytest.raises(AssertionError): reader.feed_data(b'+PONG\r\n') def test_feed_no_data(reader): assert not reader.feed_data(None) @pytest.mark.parametrize( 'read_method', ['read', 'readline', 'readuntil', 'readexactly'] ) @pytest.mark.run_loop async def test_read_flavors_not_supported(reader, read_method): with pytest.raises(RuntimeError): await getattr(reader, read_method)() aioredis-1.0.0/tests/generic_commands_test.py0000644000175000017500000005332013203624357022167 0ustar alexeyalexey00000000000000import asyncio import time import math import pytest import sys from unittest import mock from aioredis import ReplyError async def add(redis, key, value): ok = await redis.connection.execute('set', key, value) assert ok == b'OK' @pytest.mark.run_loop async def test_delete(redis): await add(redis, 'my-key', 123) await add(redis, 'other-key', 123) res = await redis.delete('my-key', 'non-existent-key') assert res == 1 res = await redis.delete('other-key', 'other-key') assert res == 1 with pytest.raises(TypeError): await redis.delete(None) with pytest.raises(TypeError): await redis.delete('my-key', 'my-key', None) @pytest.mark.run_loop async def test_dump(redis): await add(redis, 'my-key', 123) data = await redis.dump('my-key') assert data == mock.ANY assert isinstance(data, (bytes, bytearray)) assert len(data) > 0 data = await redis.dump('non-existent-key') assert data is None with pytest.raises(TypeError): await redis.dump(None) @pytest.mark.run_loop async def test_exists(redis, server): await add(redis, 'my-key', 123) res = await redis.exists('my-key') assert isinstance(res, int) assert res == 1 res = await redis.exists('non-existent-key') assert isinstance(res, int) assert res == 0 with pytest.raises(TypeError): await redis.exists(None) if server.version < (3, 0, 3): with pytest.raises(ReplyError): await redis.exists('key-1', 'key-2') @pytest.redis_version( 3, 0, 3, reason='Multi-key EXISTS available since redis>=2.8.0') @pytest.mark.run_loop async def test_exists_multiple(redis): await add(redis, 'my-key', 123) res = await redis.exists('my-key', 'other-key') assert isinstance(res, int) assert res == 1 res = await redis.exists('my-key', 'my-key') assert isinstance(res, int) assert res == 2 res = await redis.exists('foo', 'bar') assert isinstance(res, int) assert res == 0 @pytest.mark.run_loop async def test_expire(redis): await add(redis, 'my-key', 132) res = await redis.expire('my-key', 10) assert res is True res = await redis.connection.execute('TTL', 'my-key') assert res >= 10 await redis.expire('my-key', -1) res = await redis.exists('my-key') assert not res res = await redis.expire('other-key', 1000) assert res is False await add(redis, 'my-key', 1) res = await redis.expire('my-key', 10.0) assert res is True res = await redis.connection.execute('TTL', 'my-key') assert res >= 10 with pytest.raises(TypeError): await redis.expire(None, 123) with pytest.raises(TypeError): await redis.expire('my-key', 'timeout') @pytest.mark.run_loop async def test_expireat(redis): await add(redis, 'my-key', 123) now = math.ceil(time.time()) fut1 = redis.expireat('my-key', now + 10) fut2 = redis.connection.execute('TTL', 'my-key') assert (await fut1) is True assert (await fut2) >= 10 now = time.time() fut1 = redis.expireat('my-key', now + 10) fut2 = redis.connection.execute('TTL', 'my-key') assert (await fut1) is True assert (await fut2) >= 10 res = await redis.expireat('my-key', -1) assert res is True res = await redis.exists('my-key') assert not res await add(redis, 'my-key', 123) res = await redis.expireat('my-key', 0) assert res is True res = await redis.exists('my-key') assert not res await add(redis, 'my-key', 123) with pytest.raises(TypeError): await redis.expireat(None, 123) with pytest.raises(TypeError): await redis.expireat('my-key', 'timestamp') @pytest.mark.run_loop async def test_keys(redis): res = await redis.keys('*pattern*') assert res == [] await redis.connection.execute('FLUSHDB') res = await redis.keys('*') assert res == [] await add(redis, 'my-key-1', 1) await add(redis, 'my-key-ab', 1) res = await redis.keys('my-key-?') assert res == [b'my-key-1'] res = await redis.keys('my-key-*') assert sorted(res) == [b'my-key-1', b'my-key-ab'] # test with encoding param res = await redis.keys('my-key-*', encoding='utf-8') assert sorted(res) == ['my-key-1', 'my-key-ab'] with pytest.raises(TypeError): await redis.keys(None) @pytest.mark.run_loop async def test_migrate(create_redis, loop, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=2) await add(redisA, 'my-key', 123) await redisB.delete('my-key') assert (await redisA.exists('my-key')) assert not (await redisB.exists('my-key')) ok = await redisA.migrate( 'localhost', serverB.tcp_address.port, 'my-key', 2, 1000) assert ok is True assert not (await redisA.exists('my-key')) assert (await redisB.exists('my-key')) with pytest.raises(TypeError, match="host .* str"): await redisA.migrate(None, 1234, 'key', 1, 23) with pytest.raises(TypeError, match="args .* None"): await redisA.migrate('host', '1234', None, 1, 123) with pytest.raises(TypeError, match="dest_db .* int"): await redisA.migrate('host', 123, 'key', 1.0, 123) with pytest.raises(TypeError, match="timeout .* int"): await redisA.migrate('host', '1234', 'key', 2, None) with pytest.raises(ValueError, match="Got empty host"): await redisA.migrate('', '123', 'key', 1, 123) with pytest.raises(ValueError, match="dest_db .* greater equal 0"): await redisA.migrate('host', 6379, 'key', -1, 1000) with pytest.raises(ValueError, match="timeout .* greater equal 0"): await redisA.migrate('host', 6379, 'key', 1, -1000) @pytest.redis_version( 3, 0, 0, reason="Copy/Replace flags available since Redis 3.0") @pytest.mark.run_loop async def test_migrate_copy_replace(create_redis, loop, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=0) await add(redisA, 'my-key', 123) await redisB.delete('my-key') ok = await redisA.migrate( 'localhost', serverB.tcp_address.port, 'my-key', 0, 1000, copy=True) assert ok is True assert (await redisA.get('my-key')) == b'123' assert (await redisB.get('my-key')) == b'123' assert (await redisA.set('my-key', 'val')) ok = await redisA.migrate( 'localhost', serverB.tcp_address.port, 'my-key', 2, 1000, replace=True) assert (await redisA.get('my-key')) is None assert (await redisB.get('my-key')) @pytest.redis_version( 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") @pytest.mark.skipif( sys.platform == 'win32', reason="Seems to be unavailable in win32 build") @pytest.mark.run_loop async def test_migrate_keys(create_redis, loop, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=0) await add(redisA, 'key1', 123) await add(redisA, 'key2', 123) await add(redisA, 'key3', 123) await redisB.delete('key1', 'key2', 'key3') ok = await redisA.migrate_keys( 'localhost', serverB.tcp_address.port, ('key1', 'key2', 'key3', 'non-existing-key'), dest_db=0, timeout=1000) assert ok is True assert (await redisB.get('key1')) == b'123' assert (await redisB.get('key2')) == b'123' assert (await redisB.get('key3')) == b'123' assert (await redisA.get('key1')) is None assert (await redisA.get('key2')) is None assert (await redisA.get('key3')) is None ok = await redisA.migrate_keys( 'localhost', serverB.tcp_address.port, ('key1', 'key2', 'key3'), dest_db=0, timeout=1000) assert not ok ok = await redisB.migrate_keys( 'localhost', server.tcp_address.port, ('key1', 'key2', 'key3'), dest_db=0, timeout=1000, copy=True) assert ok assert (await redisB.get('key1')) == b'123' assert (await redisB.get('key2')) == b'123' assert (await redisB.get('key3')) == b'123' assert (await redisA.get('key1')) == b'123' assert (await redisA.get('key2')) == b'123' assert (await redisA.get('key3')) == b'123' assert (await redisA.set('key1', 'val')) assert (await redisA.set('key2', 'val')) assert (await redisA.set('key3', 'val')) ok = await redisA.migrate_keys( 'localhost', serverB.tcp_address.port, ('key1', 'key2', 'key3', 'non-existing-key'), dest_db=0, timeout=1000, replace=True) assert ok is True assert (await redisB.get('key1')) == b'val' assert (await redisB.get('key2')) == b'val' assert (await redisB.get('key3')) == b'val' assert (await redisA.get('key1')) is None assert (await redisA.get('key2')) is None assert (await redisA.get('key3')) is None @pytest.mark.run_loop async def test_migrate__exceptions(redis, loop, server, unused_port): await add(redis, 'my-key', 123) assert (await redis.exists('my-key')) with pytest.raises(ReplyError, match="IOERR .* timeout .*"): assert not (await redis.migrate( 'localhost', unused_port(), 'my-key', dest_db=30, timeout=10)) @pytest.redis_version( 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") @pytest.mark.skipif( sys.platform == 'win32', reason="Seems to be unavailable in win32 build") @pytest.mark.run_loop async def test_migrate_keys__errors(redis): with pytest.raises(TypeError, match="host .* str"): await redis.migrate_keys(None, 1234, 'key', 1, 23) with pytest.raises(TypeError, match="keys .* list or tuple"): await redis.migrate_keys('host', '1234', None, 1, 123) with pytest.raises(TypeError, match="dest_db .* int"): await redis.migrate_keys('host', 123, ('key',), 1.0, 123) with pytest.raises(TypeError, match="timeout .* int"): await redis.migrate_keys('host', '1234', ('key',), 2, None) with pytest.raises(ValueError, match="Got empty host"): await redis.migrate_keys('', '123', ('key',), 1, 123) with pytest.raises(ValueError, match="dest_db .* greater equal 0"): await redis.migrate_keys('host', 6379, ('key',), -1, 1000) with pytest.raises(ValueError, match="timeout .* greater equal 0"): await redis.migrate_keys('host', 6379, ('key',), 1, -1000) with pytest.raises(ValueError, match="keys .* empty"): await redis.migrate_keys('host', '1234', (), 2, 123) @pytest.mark.run_loop async def test_move(redis): await add(redis, 'my-key', 123) assert redis.db == 0 res = await redis.move('my-key', 1) assert res is True with pytest.raises(TypeError): await redis.move(None, 1) with pytest.raises(TypeError): await redis.move('my-key', None) with pytest.raises(ValueError): await redis.move('my-key', -1) with pytest.raises(TypeError): await redis.move('my-key', 'not db') @pytest.mark.run_loop async def test_object_refcount(redis): await add(redis, 'foo', 'bar') res = await redis.object_refcount('foo') assert res == 1 res = await redis.object_refcount('non-existent-key') assert res is None with pytest.raises(TypeError): await redis.object_refcount(None) @pytest.mark.run_loop async def test_object_encoding(redis, server): await add(redis, 'foo', 'bar') res = await redis.object_encoding('foo') if server.version < (3, 0, 0): assert res == b'raw' else: assert res == b'embstr' res = await redis.incr('key') assert res == 1 res = await redis.object_encoding('key') assert res == b'int' res = await redis.object_encoding('non-existent-key') assert res is None with pytest.raises(TypeError): await redis.object_encoding(None) @pytest.mark.run_loop(timeout=20) async def test_object_idletime(redis, loop, server): await add(redis, 'foo', 'bar') res = await redis.object_idletime('foo') # NOTE: sometimes travis-ci is too slow assert res >= 0 res = 0 while not res: res = await redis.object_idletime('foo') await asyncio.sleep(.5, loop=loop) assert res >= 1 res = await redis.object_idletime('non-existent-key') assert res is None with pytest.raises(TypeError): await redis.object_idletime(None) @pytest.mark.run_loop async def test_persist(redis): await add(redis, 'my-key', 123) res = await redis.expire('my-key', 10) assert res is True res = await redis.persist('my-key') assert res is True res = await redis.connection.execute('TTL', 'my-key') assert res == -1 with pytest.raises(TypeError): await redis.persist(None) @pytest.mark.run_loop async def test_pexpire(redis, loop): await add(redis, 'my-key', 123) res = await redis.pexpire('my-key', 100) assert res is True res = await redis.connection.execute('TTL', 'my-key') assert res == 0 res = await redis.connection.execute('PTTL', 'my-key') assert res > 0 await add(redis, 'my-key', 123) res = await redis.pexpire('my-key', 1) assert res is True # XXX: tests now looks strange to me. await asyncio.sleep(.2, loop=loop) res = await redis.exists('my-key') assert not res with pytest.raises(TypeError): await redis.pexpire(None, 0) with pytest.raises(TypeError): await redis.pexpire('my-key', 1.0) @pytest.mark.run_loop async def test_pexpireat(redis): await add(redis, 'my-key', 123) now = math.ceil((await redis.time()) * 1000) fut1 = redis.pexpireat('my-key', now + 2000) fut2 = redis.ttl('my-key') fut3 = redis.pttl('my-key') assert (await fut1) is True assert (await fut2) == 2 pytest.assert_almost_equal((await fut3), 2000, -3) with pytest.raises(TypeError): await redis.pexpireat(None, 1234) with pytest.raises(TypeError): await redis.pexpireat('key', 'timestamp') with pytest.raises(TypeError): await redis.pexpireat('key', 1000.0) @pytest.mark.run_loop async def test_pttl(redis, server): await add(redis, 'key', 'val') res = await redis.pttl('key') assert res == -1 res = await redis.pttl('non-existent-key') if server.version < (2, 8, 0): assert res == -1 else: assert res == -2 await redis.pexpire('key', 500) res = await redis.pttl('key') pytest.assert_almost_equal(res, 500, -2) with pytest.raises(TypeError): await redis.pttl(None) @pytest.mark.run_loop async def test_randomkey(redis): await add(redis, 'key:1', 123) await add(redis, 'key:2', 123) await add(redis, 'key:3', 123) res = await redis.randomkey() assert res in [b'key:1', b'key:2', b'key:3'] # test with encoding param res = await redis.randomkey(encoding='utf-8') assert res in ['key:1', 'key:2', 'key:3'] await redis.connection.execute('flushdb') res = await redis.randomkey() assert res is None @pytest.mark.run_loop async def test_rename(redis, server): await add(redis, 'foo', 'bar') await redis.delete('bar') res = await redis.rename('foo', 'bar') assert res is True with pytest.raises(ReplyError, match='ERR no such key'): await redis.rename('foo', 'bar') with pytest.raises(TypeError): await redis.rename(None, 'bar') with pytest.raises(TypeError): await redis.rename('foo', None) with pytest.raises(ValueError): await redis.rename('foo', 'foo') if server.version < (3, 2): with pytest.raises(ReplyError, match='.* objects are the same'): await redis.rename('bar', b'bar') @pytest.mark.run_loop async def test_renamenx(redis, server): await redis.delete('foo', 'bar') await add(redis, 'foo', 123) res = await redis.renamenx('foo', 'bar') assert res is True await add(redis, 'foo', 123) res = await redis.renamenx('foo', 'bar') assert res is False with pytest.raises(ReplyError, match='ERR no such key'): await redis.renamenx('baz', 'foo') with pytest.raises(TypeError): await redis.renamenx(None, 'foo') with pytest.raises(TypeError): await redis.renamenx('foo', None) with pytest.raises(ValueError): await redis.renamenx('foo', 'foo') if server.version < (3, 2): with pytest.raises(ReplyError, match='.* objects are the same'): await redis.renamenx('foo', b'foo') @pytest.mark.run_loop async def test_restore(redis): ok = await redis.set('key', 'value') assert ok dump = await redis.dump('key') assert dump is not None ok = await redis.delete('key') assert ok assert b'OK' == (await redis.restore('key', 0, dump)) assert (await redis.get('key')) == b'value' @pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_scan(redis): for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' key = 'key:scan:{}:{}'.format(foo_or_bar, i).encode('utf-8') await add(redis, key, i) cursor, values = await redis.scan() # values should be *>=* just in case some other tests left # test keys assert len(values) >= 10 cursor, test_values = b'0', [] while cursor: cursor, values = await redis.scan( cursor=cursor, match=b'key:scan:foo*') test_values.extend(values) assert len(test_values) == 3 cursor, test_values = b'0', [] while cursor: cursor, values = await redis.scan( cursor=cursor, match=b'key:scan:bar:*') test_values.extend(values) assert len(test_values) == 7 # SCAN family functions do not guarantee that the number of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something cursor = b'0' test_values = [] while cursor: cursor, values = await redis.scan(cursor=cursor, match=b'key:scan:*', count=2) test_values.extend(values) assert len(test_values) == 10 @pytest.mark.run_loop async def test_sort(redis): async def _make_list(key, items): await redis.delete(key) for i in items: await redis.rpush(key, i) await _make_list('a', '4231') res = await redis.sort('a') assert res == [b'1', b'2', b'3', b'4'] res = await redis.sort('a', offset=2, count=2) assert res == [b'3', b'4'] res = await redis.sort('a', asc=b'DESC') assert res == [b'4', b'3', b'2', b'1'] await _make_list('a', 'dbca') res = await redis.sort( 'a', asc=b'DESC', alpha=True, offset=2, count=2 ) assert res == [b'b', b'a'] await redis.set('key:1', 10) await redis.set('key:2', 4) await redis.set('key:3', 7) await _make_list('a', '321') res = await redis.sort('a', by='key:*') assert res == [b'2', b'3', b'1'] res = await redis.sort('a', by='nosort') assert res == [b'3', b'2', b'1'] res = await redis.sort('a', by='key:*', store='sorted_a') assert res == 3 res = await redis.lrange('sorted_a', 0, -1) assert res == [b'2', b'3', b'1'] await redis.set('value:1', 20) await redis.set('value:2', 30) await redis.set('value:3', 40) res = await redis.sort('a', 'value:*', by='key:*') assert res == [b'30', b'40', b'20'] await redis.hset('data_1', 'weight', 30) await redis.hset('data_2', 'weight', 20) await redis.hset('data_3', 'weight', 10) await redis.hset('hash_1', 'field', 20) await redis.hset('hash_2', 'field', 30) await redis.hset('hash_3', 'field', 10) res = await redis.sort( 'a', 'hash_*->field', by='data_*->weight' ) assert res == [b'10', b'30', b'20'] @pytest.mark.run_loop async def test_ttl(redis, server): await add(redis, 'key', 'val') res = await redis.ttl('key') assert res == -1 res = await redis.ttl('non-existent-key') if server.version < (2, 8, 0): assert res == -1 else: assert res == -2 await redis.expire('key', 10) res = await redis.ttl('key') assert res >= 9 with pytest.raises(TypeError): await redis.ttl(None) @pytest.mark.run_loop async def test_type(redis): await add(redis, 'key', 'val') res = await redis.type('key') assert res == b'string' await redis.delete('key') await redis.incr('key') res = await redis.type('key') assert res == b'string' await redis.delete('key') await redis.sadd('key', 'val') res = await redis.type('key') assert res == b'set' res = await redis.type('non-existent-key') assert res == b'none' with pytest.raises(TypeError): await redis.type(None) @pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_iscan(redis): full = set() foo = set() bar = set() for i in range(1, 11): is_bar = i % 3 foo_or_bar = 'bar' if is_bar else 'foo' key = 'key:scan:{}:{}'.format(foo_or_bar, i).encode('utf-8') full.add(key) if is_bar: bar.add(key) else: foo.add(key) assert await redis.set(key, i) is True async def coro(cmd): lst = [] async for i in cmd: lst.append(i) return lst ret = await coro(redis.iscan()) assert len(ret) >= 10 ret = await coro(redis.iscan(match='key:scan:*')) assert 10 == len(ret) assert set(ret) == full ret = await coro(redis.iscan(match='key:scan:foo*')) assert set(ret) == foo ret = await coro(redis.iscan(match='key:scan:bar*')) assert set(ret) == bar # SCAN family functions do not guarantee that the number of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something ret = await coro(redis.iscan(match='key:scan:*', count=2)) assert 10 == len(ret) assert set(ret) == full aioredis-1.0.0/tests/pubsub_commands_test.py0000644000175000017500000001743313203624357022060 0ustar alexeyalexey00000000000000import asyncio import pytest async def _reader(channel, output, waiter, conn): await conn.execute('subscribe', channel) ch = conn.pubsub_channels[channel] waiter.set_result(conn) while await ch.wait_message(): msg = await ch.get() await output.put(msg) @pytest.mark.run_loop async def test_publish(create_connection, redis, server, loop): out = asyncio.Queue(loop=loop) fut = loop.create_future() conn = await create_connection( server.tcp_address, loop=loop) sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) await fut await redis.publish('chan:1', 'Hello') msg = await out.get() assert msg == b'Hello' sub.cancel() @pytest.mark.run_loop async def test_publish_json(create_connection, redis, server, loop): out = asyncio.Queue(loop=loop) fut = loop.create_future() conn = await create_connection( server.tcp_address, loop=loop) sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) await fut res = await redis.publish_json('chan:1', {"Hello": "world"}) assert res == 1 # recievers msg = await out.get() assert msg == b'{"Hello": "world"}' sub.cancel() @pytest.mark.run_loop async def test_subscribe(redis): res = await redis.subscribe('chan:1', 'chan:2') assert redis.in_pubsub == 2 ch1 = redis.channels['chan:1'] ch2 = redis.channels['chan:2'] assert res == [ch1, ch2] assert ch1.is_pattern is False assert ch2.is_pattern is False res = await redis.unsubscribe('chan:1', 'chan:2') assert res == [[b'unsubscribe', b'chan:1', 1], [b'unsubscribe', b'chan:2', 0]] @pytest.mark.run_loop async def test_psubscribe(redis, create_redis, server, loop): sub = redis res = await sub.psubscribe('patt:*', 'chan:*') assert sub.in_pubsub == 2 pat1 = sub.patterns['patt:*'] pat2 = sub.patterns['chan:*'] assert res == [pat1, pat2] pub = await create_redis( server.tcp_address, loop=loop) await pub.publish_json('chan:123', {"Hello": "World"}) res = await pat2.get_json() assert res == (b'chan:123', {"Hello": "World"}) res = await sub.punsubscribe('patt:*', 'patt:*', 'chan:*') assert res == [[b'punsubscribe', b'patt:*', 1], [b'punsubscribe', b'patt:*', 1], [b'punsubscribe', b'chan:*', 0], ] @pytest.redis_version( 2, 8, 0, reason='PUBSUB CHANNELS is available since redis>=2.8.0') @pytest.mark.run_loop async def test_pubsub_channels(create_redis, server, loop): redis = await create_redis( server.tcp_address, loop=loop) res = await redis.pubsub_channels() assert res == [] res = await redis.pubsub_channels('chan:*') assert res == [] sub = await create_redis( server.tcp_address, loop=loop) await sub.subscribe('chan:1') res = await redis.pubsub_channels() assert res == [b'chan:1'] res = await redis.pubsub_channels('ch*') assert res == [b'chan:1'] await sub.unsubscribe('chan:1') await sub.psubscribe('chan:*') res = await redis.pubsub_channels() assert res == [] @pytest.redis_version( 2, 8, 0, reason='PUBSUB NUMSUB is available since redis>=2.8.0') @pytest.mark.run_loop async def test_pubsub_numsub(create_redis, server, loop): redis = await create_redis( server.tcp_address, loop=loop) res = await redis.pubsub_numsub() assert res == {} res = await redis.pubsub_numsub('chan:1') assert res == {b'chan:1': 0} sub = await create_redis( server.tcp_address, loop=loop) await sub.subscribe('chan:1') res = await redis.pubsub_numsub() assert res == {} res = await redis.pubsub_numsub('chan:1') assert res == {b'chan:1': 1} res = await redis.pubsub_numsub('chan:2') assert res == {b'chan:2': 0} res = await redis.pubsub_numsub('chan:1', 'chan:2') assert res == {b'chan:1': 1, b'chan:2': 0} await sub.unsubscribe('chan:1') await sub.psubscribe('chan:*') res = await redis.pubsub_numsub() assert res == {} @pytest.redis_version( 2, 8, 0, reason='PUBSUB NUMPAT is available since redis>=2.8.0') @pytest.mark.run_loop async def test_pubsub_numpat(create_redis, server, loop, redis): sub = await create_redis( server.tcp_address, loop=loop) res = await redis.pubsub_numpat() assert res == 0 await sub.subscribe('chan:1') res = await redis.pubsub_numpat() assert res == 0 await sub.psubscribe('chan:*') res = await redis.pubsub_numpat() assert res == 1 @pytest.mark.run_loop async def test_close_pubsub_channels(redis, loop): ch, = await redis.subscribe('chan:1') async def waiter(ch): assert not await ch.wait_message() tsk = asyncio.ensure_future(waiter(ch), loop=loop) redis.close() await redis.wait_closed() await tsk @pytest.mark.run_loop async def test_close_pubsub_patterns(redis, loop): ch, = await redis.psubscribe('chan:*') async def waiter(ch): assert not await ch.wait_message() tsk = asyncio.ensure_future(waiter(ch), loop=loop) redis.close() await redis.wait_closed() await tsk @pytest.mark.run_loop async def test_close_cancelled_pubsub_channel(redis, loop): ch, = await redis.subscribe('chan:1') async def waiter(ch): with pytest.raises(asyncio.CancelledError): await ch.wait_message() tsk = asyncio.ensure_future(waiter(ch), loop=loop) await asyncio.sleep(0, loop=loop) tsk.cancel() @pytest.mark.run_loop async def test_channel_get_after_close(create_redis, loop, server): sub = await create_redis( server.tcp_address, loop=loop) pub = await create_redis( server.tcp_address, loop=loop) ch, = await sub.subscribe('chan:1') async def waiter(): while True: msg = await ch.get() if msg is None: break assert msg == b'message' tsk = asyncio.ensure_future(waiter(), loop=loop) await pub.publish('chan:1', 'message') sub.close() await tsk @pytest.mark.run_loop async def test_subscribe_concurrency(create_redis, server, loop): sub = await create_redis( server.tcp_address, loop=loop) pub = await create_redis( server.tcp_address, loop=loop) async def subscribe(*args): return await sub.subscribe(*args) async def publish(*args): await asyncio.sleep(0, loop=loop) return await pub.publish(*args) res = await asyncio.gather( subscribe('channel:0'), publish('channel:0', 'Hello'), subscribe('channel:1'), loop=loop) (ch1,), subs, (ch2,) = res assert ch1.name == b'channel:0' assert subs == 1 assert ch2.name == b'channel:1' @pytest.redis_version( 3, 2, 0, reason='PUBSUB PING is available since redis>=3.2.0') @pytest.mark.run_loop async def test_pubsub_ping(redis): await redis.subscribe('chan:1', 'chan:2') res = await redis.ping() assert res == b'PONG' res = await redis.ping('Hello') assert res == b'Hello' res = await redis.ping('Hello', encoding='utf-8') assert res == 'Hello' await redis.unsubscribe('chan:1', 'chan:2') @pytest.mark.run_loop async def test_pubsub_channel_iter(create_redis, server, loop): sub = await create_redis(server.tcp_address, loop=loop) pub = await create_redis(server.tcp_address, loop=loop) ch, = await sub.subscribe('chan:1') async def coro(ch): lst = [] async for msg in ch.iter(): lst.append(msg) return lst tsk = asyncio.ensure_future(coro(ch), loop=loop) await pub.publish_json('chan:1', {'Hello': 'World'}) await pub.publish_json('chan:1', ['message']) await asyncio.sleep(0, loop=loop) ch.close() assert await tsk == [b'{"Hello": "World"}', b'["message"]'] aioredis-1.0.0/tests/errors_test.py0000644000175000017500000000071313203624357020204 0ustar alexeyalexey00000000000000from aioredis.errors import ReplyError from aioredis.errors import MaxClientsError def test_return_default_class(): assert isinstance(ReplyError(None), ReplyError) def test_return_adhoc_class(): class MyError(ReplyError): MATCH_REPLY = "my error" assert isinstance(ReplyError("my error"), MyError) def test_return_max_clients_error(): assert isinstance( ReplyError("ERR max number of clients reached"), MaxClientsError) aioredis-1.0.0/tests/ssl_test.py0000644000175000017500000000167613203624357017502 0ustar alexeyalexey00000000000000import pytest @pytest.mark.run_loop async def test_ssl_connection(create_connection, loop, server, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) conn = await create_connection( ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) res = await conn.execute('ping') assert res == b'PONG' @pytest.mark.run_loop async def test_ssl_redis(create_redis, loop, server, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) redis = await create_redis( ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) res = await redis.ping() assert res == b'PONG' @pytest.mark.run_loop async def test_ssl_pool(create_pool, server, loop, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) pool = await create_pool( ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) with (await pool) as conn: res = await conn.execute('PING') assert res == b'PONG' aioredis-1.0.0/tests/hyperloglog_commands_test.py0000644000175000017500000000602713203624357023110 0ustar alexeyalexey00000000000000import pytest pytestmark = pytest.redis_version( 2, 8, 9, reason='HyperLogLog works only with redis>=2.8.9') @pytest.mark.run_loop async def test_pfcount(redis): key = 'hll_pfcount' other_key = 'some-other-hll' # add initial data, cardinality changed so command returns 1 is_changed = await redis.pfadd(key, 'foo', 'bar', 'zap') assert is_changed == 1 # add more data, cardinality not changed so command returns 0 is_changed = await redis.pfadd(key, 'zap', 'zap', 'zap') assert is_changed == 0 # add event more data, cardinality not changed so command returns 0 is_changed = await redis.pfadd(key, 'foo', 'bar') assert is_changed == 0 # check cardinality of one key cardinality = await redis.pfcount(key) assert cardinality == 3 # create new key (variable) for cardinality estimation is_changed = await redis.pfadd(other_key, 1, 2, 3) assert is_changed == 1 # check cardinality of multiple keys cardinality = await redis.pfcount(key, other_key) assert cardinality == 6 with pytest.raises(TypeError): await redis.pfcount(None) with pytest.raises(TypeError): await redis.pfcount(key, None) with pytest.raises(TypeError): await redis.pfcount(key, key, None) @pytest.mark.run_loop async def test_pfadd(redis): key = 'hll_pfadd' values = ['a', 's', 'y', 'n', 'c', 'i', 'o'] # add initial data, cardinality changed so command returns 1 is_changed = await redis.pfadd(key, *values) assert is_changed == 1 # add event more data, cardinality not changed so command returns 0 is_changed = await redis.pfadd(key, 'i', 'o') assert is_changed == 0 @pytest.mark.run_loop async def test_pfadd_wrong_input(redis): with pytest.raises(TypeError): await redis.pfadd(None, 'value') @pytest.mark.run_loop async def test_pfmerge(redis): key = 'hll_asyncio' key_other = 'hll_aioredis' key_dest = 'hll_aio' values = ['a', 's', 'y', 'n', 'c', 'i', 'o'] values_other = ['a', 'i', 'o', 'r', 'e', 'd', 'i', 's'] data_set = set(values + values_other) cardinality_merged = len(data_set) # add initial data, cardinality changed so command returns 1 await redis.pfadd(key, *values) await redis.pfadd(key_other, *values_other) # check cardinality of one key cardinality = await redis.pfcount(key) assert cardinality == len(set(values_other)) cardinality_other = await redis.pfcount(key_other) assert cardinality_other == len(set(values_other)) await redis.pfmerge(key_dest, key, key_other) cardinality_dest = await redis.pfcount(key_dest) assert cardinality_dest == cardinality_merged with pytest.raises(TypeError): await redis.pfmerge(None, key) with pytest.raises(TypeError): await redis.pfmerge(key_dest, None) with pytest.raises(TypeError): await redis.pfmerge(key_dest, key, None) @pytest.mark.run_loop async def test_pfmerge_wrong_input(redis): with pytest.raises(TypeError): await redis.pfmerge(None, 'value') aioredis-1.0.0/tests/hash_commands_test.py0000644000175000017500000003660113203624357021501 0ustar alexeyalexey00000000000000import pytest from aioredis import ReplyError async def add(redis, key, field, value): ok = await redis.connection.execute( b'hset', key, field, value) assert ok == 1 @pytest.mark.run_loop async def test_hdel(redis): key, field, value = b'key:hdel', b'bar', b'zap' await add(redis, key, field, value) # delete value that exists, expected 1 result = await redis.hdel(key, field) assert result == 1 # delete value that does not exists, expected 0 result = await redis.hdel(key, field) assert result == 0 with pytest.raises(TypeError): await redis.hdel(None, field) @pytest.mark.run_loop async def test_hexists(redis): key, field, value = b'key:hexists', b'bar', b'zap' await add(redis, key, field, value) # check value that exists, expected 1 result = await redis.hexists(key, field) assert result == 1 # check value when, key exists and field does not, expected 0 result = await redis.hexists(key, b'not:' + field) assert result == 0 # check value when, key not exists, expected 0 result = await redis.hexists(b'not:' + key, field) assert result == 0 with pytest.raises(TypeError): await redis.hexists(None, field) @pytest.mark.run_loop async def test_hget(redis): key, field, value = b'key:hget', b'bar', b'zap' await add(redis, key, field, value) # basic test, fetch value and check in to reference test_value = await redis.hget(key, field) assert test_value == value # fetch value, when field does not exists test_value = await redis.hget(key, b'not' + field) assert test_value is None # fetch value when key does not exists test_value = await redis.hget(b'not:' + key, b'baz') assert test_value is None # check encoding test_value = await redis.hget(key, field, encoding='utf-8') assert test_value == 'zap' with pytest.raises(TypeError): await redis.hget(None, field) @pytest.mark.run_loop async def test_hgetall(redis): await add(redis, 'key:hgetall', 'foo', 'baz') await add(redis, 'key:hgetall', 'bar', 'zap') test_value = await redis.hgetall('key:hgetall') assert isinstance(test_value, dict) assert {b'foo': b'baz', b'bar': b'zap'} == test_value # try to get all values from key that does not exits test_value = await redis.hgetall(b'not:key:hgetall') assert test_value == {} # check encoding param test_value = await redis.hgetall( 'key:hgetall', encoding='utf-8') assert {'foo': 'baz', 'bar': 'zap'} == test_value with pytest.raises(TypeError): await redis.hgetall(None) @pytest.mark.run_loop async def test_hincrby(redis): key, field, value = b'key:hincrby', b'bar', 1 await add(redis, key, field, value) # increment initial value by 2 result = await redis.hincrby(key, field, 2) assert result == 3 result = await redis.hincrby(key, field, -1) assert result == 2 result = await redis.hincrby(key, field, -100) assert result == -98 result = await redis.hincrby(key, field, -2) assert result == -100 # increment value in case of key or field that does not exists result = await redis.hincrby(b'not:' + key, field, 2) assert result == 2 result = await redis.hincrby(key, b'not:' + field, 2) assert result == 2 with pytest.raises(ReplyError): await redis.hincrby(key, b'not:' + field, 3.14) with pytest.raises(ReplyError): # initial value is float, try to increment 1 await add(redis, b'other:' + key, field, 3.14) await redis.hincrby(b'other:' + key, field, 1) with pytest.raises(TypeError): await redis.hincrby(None, field, 2) @pytest.mark.run_loop async def test_hincrbyfloat(redis): key, field, value = b'key:hincrbyfloat', b'bar', 2.71 await add(redis, key, field, value) result = await redis.hincrbyfloat(key, field, 3.14) assert result == 5.85 result = await redis.hincrbyfloat(key, field, -2.71) assert result == 3.14 result = await redis.hincrbyfloat(key, field, -100.1) assert result == -96.96 # increment value in case of key or field that does not exists result = await redis.hincrbyfloat(b'not:' + key, field, 3.14) assert result == 3.14 result = await redis.hincrbyfloat(key, b'not:' + field, 3.14) assert result == 3.14 with pytest.raises(TypeError): await redis.hincrbyfloat(None, field, 2) @pytest.mark.run_loop async def test_hkeys(redis): key = b'key:hkeys' field1, field2 = b'foo', b'bar' value1, value2 = b'baz', b'zap' await add(redis, key, field1, value1) await add(redis, key, field2, value2) test_value = await redis.hkeys(key) assert set(test_value) == {field1, field2} test_value = await redis.hkeys(b'not:' + key) assert test_value == [] test_value = await redis.hkeys(key, encoding='utf-8') assert set(test_value) == {'foo', 'bar'} with pytest.raises(TypeError): await redis.hkeys(None) @pytest.mark.run_loop async def test_hlen(redis): key = b'key:hlen' field1, field2 = b'foo', b'bar' value1, value2 = b'baz', b'zap' await add(redis, key, field1, value1) await add(redis, key, field2, value2) test_value = await redis.hlen(key) assert test_value == 2 test_value = await redis.hlen(b'not:' + key) assert test_value == 0 with pytest.raises(TypeError): await redis.hlen(None) @pytest.mark.run_loop async def test_hmget(redis): key = b'key:hmget' field1, field2 = b'foo', b'bar' value1, value2 = b'baz', b'zap' await add(redis, key, field1, value1) await add(redis, key, field2, value2) test_value = await redis.hmget(key, field1, field2) assert set(test_value) == {value1, value2} test_value = await redis.hmget( key, b'not:' + field1, b'not:' + field2) assert [None, None] == test_value val = await redis.hincrby(key, 'numeric') assert val == 1 test_value = await redis.hmget( key, field1, field2, 'numeric', encoding='utf-8') assert ['baz', 'zap', '1'] == test_value with pytest.raises(TypeError): await redis.hmget(None, field1, field2) @pytest.mark.run_loop async def test_hmset(redis): key, field, value = b'key:hmset', b'bar', b'zap' await add(redis, key, field, value) # key and field exists test_value = await redis.hmset(key, field, b'baz') assert test_value is True result = await redis.hexists(key, field) assert result == 1 # key and field does not exists test_value = await redis.hmset(b'not:' + key, field, value) assert test_value is True result = await redis.hexists(b'not:' + key, field) assert result == 1 # set multiple pairs = [b'foo', b'baz', b'bar', b'paz'] test_value = await redis.hmset(key, *pairs) assert test_value is True test_value = await redis.hmget(key, b'foo', b'bar') assert set(test_value) == {b'baz', b'paz'} with pytest.raises(TypeError): await redis.hmset(key, b'foo', b'bar', b'baz') with pytest.raises(TypeError): await redis.hmset(None, *pairs) with pytest.raises(TypeError): await redis.hmset(key, {'foo': 'bar'}, {'baz': 'bad'}) with pytest.raises(TypeError): await redis.hmset(key) @pytest.mark.run_loop async def test_hmset_dict(redis): key = 'key:hmset' # dict d1 = {b'foo': b'one dict'} test_value = await redis.hmset_dict(key, d1) assert test_value is True test_value = await redis.hget(key, b'foo') assert test_value == b'one dict' # kwdict test_value = await redis.hmset_dict(key, foo=b'kw1', bar=b'kw2') assert test_value is True test_value = await redis.hmget(key, b'foo', b'bar') assert set(test_value) == {b'kw1', b'kw2'} # dict & kwdict d1 = {b'foo': b'dict'} test_value = await redis.hmset_dict(key, d1, foo=b'kw') assert test_value is True test_value = await redis.hget(key, b'foo') assert test_value == b'kw' # allow empty dict with kwargs test_value = await redis.hmset_dict(key, {}, foo='kw') assert test_value is True test_value = await redis.hget(key, 'foo') assert test_value == b'kw' with pytest.raises(TypeError): await redis.hmset_dict(key) with pytest.raises(ValueError): await redis.hmset_dict(key, {}) with pytest.raises(TypeError): await redis.hmset_dict(key, ('foo', 'pairs')) with pytest.raises(TypeError): await redis.hmset_dict(key, b'foo', 'pairs') with pytest.raises(TypeError): await redis.hmset_dict(key, b'foo', 'pairs', foo=b'kw1') with pytest.raises(TypeError): await redis.hmset_dict(key, {'a': 1}, {'b': 2}) with pytest.raises(TypeError): await redis.hmset_dict(key, {'a': 1}, {'b': 2}, 'c', 3, d=4) @pytest.mark.run_loop async def test_hset(redis): key, field, value = b'key:hset', b'bar', b'zap' test_value = await redis.hset(key, field, value) assert test_value == 1 test_value = await redis.hset(key, field, value) assert test_value == 0 test_value = await redis.hset(b'other:' + key, field, value) assert test_value == 1 result = await redis.hexists(b'other:' + key, field) assert result == 1 with pytest.raises(TypeError): await redis.hset(None, field, value) @pytest.mark.run_loop async def test_hsetnx(redis): key, field, value = b'key:hsetnx', b'bar', b'zap' # field does not exists, operation should be successful test_value = await redis.hsetnx(key, field, value) assert test_value == 1 # make sure that value was stored result = await redis.hget(key, field) assert result == value # field exists, operation should not change any value test_value = await redis.hsetnx(key, field, b'baz') assert test_value == 0 # make sure value was not changed result = await redis.hget(key, field) assert result == value with pytest.raises(TypeError): await redis.hsetnx(None, field, value) @pytest.mark.run_loop async def test_hvals(redis): key = b'key:hvals' field1, field2 = b'foo', b'bar' value1, value2 = b'baz', b'zap' await add(redis, key, field1, value1) await add(redis, key, field2, value2) test_value = await redis.hvals(key) assert set(test_value) == {value1, value2} test_value = await redis.hvals(b'not:' + key) assert test_value == [] test_value = await redis.hvals(key, encoding='utf-8') assert set(test_value) == {'baz', 'zap'} with pytest.raises(TypeError): await redis.hvals(None) @pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_hscan(redis): key = b'key:hscan' # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' f = 'field:{}:{}'.format(foo_or_bar, i).encode('utf-8') v = 'value:{}'.format(i).encode('utf-8') await add(redis, key, f, v) # fetch 'field:foo:*' items expected tuple with 3 fields and 3 values cursor, values = await redis.hscan(key, match=b'field:foo:*') assert len(values) == 3 assert sorted(values) == [ (b'field:foo:3', b'value:3'), (b'field:foo:6', b'value:6'), (b'field:foo:9', b'value:9'), ] # fetch 'field:bar:*' items expected tuple with 7 fields and 7 values cursor, values = await redis.hscan(key, match=b'field:bar:*') assert len(values) == 7 assert sorted(values) == [ (b'field:bar:1', b'value:1'), (b'field:bar:10', b'value:10'), (b'field:bar:2', b'value:2'), (b'field:bar:4', b'value:4'), (b'field:bar:5', b'value:5'), (b'field:bar:7', b'value:7'), (b'field:bar:8', b'value:8'), ] # SCAN family functions do not guarantee that the number of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something cursor = b'0' test_values = [] while cursor: cursor, values = await redis.hscan(key, cursor, count=1) test_values.extend(values) assert len(test_values) == 10 with pytest.raises(TypeError): await redis.hscan(None) @pytest.mark.run_loop async def test_hgetall_enc(create_redis, loop, server): redis = await create_redis( server.tcp_address, loop=loop, encoding='utf-8') TEST_KEY = 'my-key-nx' await redis.hmset(TEST_KEY, 'foo', 'bar', 'baz', 'bad') tr = redis.multi_exec() tr.hgetall(TEST_KEY) res = await tr.execute() assert res == [{'foo': 'bar', 'baz': 'bad'}] @pytest.mark.run_loop @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") async def test_hstrlen(redis): ok = await redis.hset('myhash', 'str_field', 'some value') assert ok == 1 ok = await redis.hincrby('myhash', 'uint_field', 1) assert ok == 1 ok = await redis.hincrby('myhash', 'int_field', -1) assert ok == -1 l = await redis.hstrlen('myhash', 'str_field') assert l == 10 l = await redis.hstrlen('myhash', 'uint_field') assert l == 1 l = await redis.hstrlen('myhash', 'int_field') assert l == 2 l = await redis.hstrlen('myhash', 'none_field') assert l == 0 l = await redis.hstrlen('none_key', 'none_field') assert l == 0 @pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_ihscan(redis): key = b'key:hscan' # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' f = 'field:{}:{}'.format(foo_or_bar, i).encode('utf-8') v = 'value:{}'.format(i).encode('utf-8') assert await redis.hset(key, f, v) == 1 async def coro(cmd): lst = [] async for i in cmd: lst.append(i) return lst # fetch 'field:foo:*' items expected tuple with 3 fields and 3 values ret = await coro(redis.ihscan(key, match=b'field:foo:*')) assert set(ret) == {(b'field:foo:3', b'value:3'), (b'field:foo:6', b'value:6'), (b'field:foo:9', b'value:9')} # fetch 'field:bar:*' items expected tuple with 7 fields and 7 values ret = await coro(redis.ihscan(key, match=b'field:bar:*')) assert set(ret) == {(b'field:bar:1', b'value:1'), (b'field:bar:2', b'value:2'), (b'field:bar:4', b'value:4'), (b'field:bar:5', b'value:5'), (b'field:bar:7', b'value:7'), (b'field:bar:8', b'value:8'), (b'field:bar:10', b'value:10')} # SCAN family functions do not guarantee that the number of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something ret = await coro(redis.ihscan(key, count=1)) assert set(ret) == {(b'field:foo:3', b'value:3'), (b'field:foo:6', b'value:6'), (b'field:foo:9', b'value:9'), (b'field:bar:1', b'value:1'), (b'field:bar:2', b'value:2'), (b'field:bar:4', b'value:4'), (b'field:bar:5', b'value:5'), (b'field:bar:7', b'value:7'), (b'field:bar:8', b'value:8'), (b'field:bar:10', b'value:10')} with pytest.raises(TypeError): await redis.ihscan(None) aioredis-1.0.0/tests/list_commands_test.py0000644000175000017500000004101513203624357021524 0ustar alexeyalexey00000000000000import pytest import asyncio from aioredis import ReplyError async def push_data_with_sleep(redis, loop, key, *values): await asyncio.sleep(0.2, loop=loop) result = await redis.lpush(key, *values) return result @pytest.mark.run_loop async def test_blpop(redis): key1, value1 = b'key:blpop:1', b'blpop:value:1' key2, value2 = b'key:blpop:2', b'blpop:value:2' # setup list result = await redis.rpush(key1, value1, value2) assert result == 2 # make sure that left value poped test_value = await redis.blpop(key1) assert test_value == [key1, value1] # pop remaining value, so list should become empty test_value = await redis.blpop(key1) assert test_value == [key1, value2] with pytest.raises(TypeError): await redis.blpop(None) with pytest.raises(TypeError): await redis.blpop(key1, None) with pytest.raises(TypeError): await redis.blpop(key1, timeout=b'one') with pytest.raises(ValueError): await redis.blpop(key2, timeout=-10) # test encoding param await redis.rpush(key2, value1) test_value = await redis.blpop(key2, encoding='utf-8') assert test_value == ['key:blpop:2', 'blpop:value:1'] @pytest.mark.run_loop async def test_blpop_blocking_features(redis, create_redis, loop, server): key1, key2 = b'key:blpop:1', b'key:blpop:2' value = b'blpop:value:2' other_redis = await create_redis( server.tcp_address, loop=loop) # create blocking task in separate connection consumer = other_redis.blpop(key1, key2) producer_task = asyncio.Task( push_data_with_sleep(redis, loop, key2, value), loop=loop) results = await asyncio.gather( consumer, producer_task, loop=loop) assert results[0] == [key2, value] assert results[1] == 1 # wait for data with timeout, list is emtpy, so blpop should # return None in 1 sec waiter = redis.blpop(key1, key2, timeout=1) test_value = await waiter assert test_value is None other_redis.close() @pytest.mark.run_loop async def test_brpop(redis): key1, value1 = b'key:brpop:1', b'brpop:value:1' key2, value2 = b'key:brpop:2', b'brpop:value:2' # setup list result = await redis.rpush(key1, value1, value2) assert result == 2 # make sure that right value poped test_value = await redis.brpop(key1) assert test_value == [key1, value2] # pop remaining value, so list should become empty test_value = await redis.brpop(key1) assert test_value == [key1, value1] with pytest.raises(TypeError): await redis.brpop(None) with pytest.raises(TypeError): await redis.brpop(key1, None) with pytest.raises(TypeError): await redis.brpop(key1, timeout=b'one') with pytest.raises(ValueError): await redis.brpop(key2, timeout=-10) # test encoding param await redis.rpush(key2, value1) test_value = await redis.brpop(key2, encoding='utf-8') assert test_value == ['key:brpop:2', 'brpop:value:1'] @pytest.mark.run_loop async def test_brpop_blocking_features(redis, create_redis, server, loop): key1, key2 = b'key:brpop:1', b'key:brpop:2' value = b'brpop:value:2' other_redis = await create_redis( server.tcp_address, loop=loop) # create blocking task in separate connection consumer_task = other_redis.brpop(key1, key2) producer_task = asyncio.Task( push_data_with_sleep(redis, loop, key2, value), loop=loop) results = await asyncio.gather( consumer_task, producer_task, loop=loop) assert results[0] == [key2, value] assert results[1] == 1 # wait for data with timeout, list is emtpy, so brpop should # return None in 1 sec waiter = redis.brpop(key1, key2, timeout=1) test_value = await waiter assert test_value is None @pytest.mark.run_loop async def test_brpoplpush(redis): key = b'key:brpoplpush:1' value1, value2 = b'brpoplpush:value:1', b'brpoplpush:value:2' destkey = b'destkey:brpoplpush:1' # setup list await redis.rpush(key, value1, value2) # move value in into head of new list result = await redis.brpoplpush(key, destkey) assert result == value2 # move last value result = await redis.brpoplpush(key, destkey) assert result == value1 # make sure that all values stored in new destkey list test_value = await redis.lrange(destkey, 0, -1) assert test_value == [value1, value2] with pytest.raises(TypeError): await redis.brpoplpush(None, destkey) with pytest.raises(TypeError): await redis.brpoplpush(key, None) with pytest.raises(TypeError): await redis.brpoplpush(key, destkey, timeout=b'one') with pytest.raises(ValueError): await redis.brpoplpush(key, destkey, timeout=-10) # test encoding param result = await redis.brpoplpush( destkey, key, encoding='utf-8') assert result == 'brpoplpush:value:2' @pytest.mark.run_loop async def test_brpoplpush_blocking_features(redis, create_redis, server, loop): source = b'key:brpoplpush:12' value = b'brpoplpush:value:2' destkey = b'destkey:brpoplpush:2' other_redis = await create_redis( server.tcp_address, loop=loop) # create blocking task consumer_task = other_redis.brpoplpush(source, destkey) producer_task = asyncio.Task( push_data_with_sleep(redis, loop, source, value), loop=loop) results = await asyncio.gather( consumer_task, producer_task, loop=loop) assert results[0] == value assert results[1] == 1 # make sure that all values stored in new destkey list test_value = await redis.lrange(destkey, 0, -1) assert test_value == [value] # wait for data with timeout, list is emtpy, so brpoplpush should # return None in 1 sec waiter = redis.brpoplpush(source, destkey, timeout=1) test_value = await waiter assert test_value is None other_redis.close() @pytest.mark.run_loop async def test_lindex(redis): key, value = b'key:lindex:1', 'value:{}' # setup list values = [value.format(i).encode('utf-8') for i in range(0, 10)] await redis.rpush(key, *values) # make sure that all indexes are correct for i in range(0, 10): test_value = await redis.lindex(key, i) assert test_value == values[i] # get last element test_value = await redis.lindex(key, -1) assert test_value == b'value:9' # index of element if key does not exists test_value = await redis.lindex(b'not:' + key, 5) assert test_value is None # test encoding param await redis.rpush(key, 'one', 'two') test_value = await redis.lindex(key, 10, encoding='utf-8') assert test_value == 'one' test_value = await redis.lindex(key, 11, encoding='utf-8') assert test_value == 'two' with pytest.raises(TypeError): await redis.lindex(None, -1) with pytest.raises(TypeError): await redis.lindex(key, b'one') @pytest.mark.run_loop async def test_linsert(redis): key = b'key:linsert:1' value1, value2, value3, value4 = b'Hello', b'World', b'foo', b'bar' await redis.rpush(key, value1, value2) # insert element before pivot test_value = await redis.linsert( key, value2, value3, before=True) assert test_value == 3 # insert element after pivot test_value = await redis.linsert( key, value2, value4, before=False) assert test_value == 4 # make sure that values actually inserted in right placed test_value = await redis.lrange(key, 0, -1) expected = [value1, value3, value2, value4] assert test_value == expected # try to insert something when pivot value does not exits test_value = await redis.linsert( key, b'not:pivot', value3, before=True) assert test_value == -1 with pytest.raises(TypeError): await redis.linsert(None, value1, value3) @pytest.mark.run_loop async def test_llen(redis): key = b'key:llen:1' value1, value2 = b'Hello', b'World' await redis.rpush(key, value1, value2) test_value = await redis.llen(key) assert test_value == 2 test_value = await redis.llen(b'not:' + key) assert test_value == 0 with pytest.raises(TypeError): await redis.llen(None) @pytest.mark.run_loop async def test_lpop(redis): key = b'key:lpop:1' value1, value2 = b'lpop:value:1', b'lpop:value:2' # setup list result = await redis.rpush(key, value1, value2) assert result == 2 # make sure that left value poped test_value = await redis.lpop(key) assert test_value == value1 # pop remaining value, so list should become empty test_value = await redis.lpop(key) assert test_value == value2 # pop from empty list test_value = await redis.lpop(key) assert test_value is None # test encoding param await redis.rpush(key, 'value') test_value = await redis.lpop(key, encoding='utf-8') assert test_value == 'value' with pytest.raises(TypeError): await redis.lpop(None) @pytest.mark.run_loop async def test_lpush(redis): key = b'key:lpush' value1, value2 = b'value:1', b'value:2' # add multiple values to the list, with key that does not exists result = await redis.lpush(key, value1, value2) assert result == 2 # make sure that values actually inserted in right placed and order test_value = await redis.lrange(key, 0, -1) assert test_value == [value2, value1] # test encoding param test_value = await redis.lrange(key, 0, -1, encoding='utf-8') assert test_value == ['value:2', 'value:1'] with pytest.raises(TypeError): await redis.lpush(None, value1) @pytest.mark.run_loop async def test_lpushx(redis): key = b'key:lpushx' value1, value2 = b'value:1', b'value:2' # add multiple values to the list, with key that does not exists # so value should not be pushed result = await redis.lpushx(key, value2) assert result == 0 # init key with list by using regular lpush result = await redis.lpush(key, value1) assert result == 1 result = await redis.lpushx(key, value2) assert result == 2 # make sure that values actually inserted in right placed and order test_value = await redis.lrange(key, 0, -1) assert test_value == [value2, value1] with pytest.raises(TypeError): await redis.lpushx(None, value1) @pytest.mark.run_loop async def test_lrange(redis): key, value = b'key:lrange:1', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 10)] await redis.rpush(key, *values) test_value = await redis.lrange(key, 0, 2) assert test_value == values[0:3] test_value = await redis.lrange(key, 0, -1) assert test_value == values test_value = await redis.lrange(key, -2, -1) assert test_value == values[-2:] # range of elements if key does not exists test_value = await redis.lrange(b'not:' + key, 0, -1) assert test_value == [] with pytest.raises(TypeError): await redis.lrange(None, 0, -1) with pytest.raises(TypeError): await redis.lrange(key, b'zero', -1) with pytest.raises(TypeError): await redis.lrange(key, 0, b'one') @pytest.mark.run_loop async def test_lrem(redis): key, value = b'key:lrem:1', 'value:{}' values = [value.format(i % 2).encode('utf-8') for i in range(0, 10)] await redis.rpush(key, *values) # remove elements from tail to head test_value = await redis.lrem(key, -4, b'value:0') assert test_value == 4 # remove element from head to tail test_value = await redis.lrem(key, 4, b'value:1') assert test_value == 4 # remove values that not in list test_value = await redis.lrem(key, 4, b'value:other') assert test_value == 0 # make sure that only two values left in the list test_value = await redis.lrange(key, 0, -1) assert test_value == [b'value:0', b'value:1'] # remove all instance of value:0 test_value = await redis.lrem(key, 0, b'value:0') assert test_value == 1 # make sure that only one values left in the list test_value = await redis.lrange(key, 0, -1) assert test_value == [b'value:1'] with pytest.raises(TypeError): await redis.lrem(None, 0, b'value:0') with pytest.raises(TypeError): await redis.lrem(key, b'ten', b'value:0') @pytest.mark.run_loop async def test_lset(redis): key, value = b'key:lset', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 3)] await redis.rpush(key, *values) await redis.lset(key, 0, b'foo') await redis.lset(key, -1, b'baz') await redis.lset(key, -2, b'zap') test_value = await redis.lrange(key, 0, -1) assert test_value == [b'foo', b'zap', b'baz'] with pytest.raises(TypeError): await redis.lset(None, 0, b'value:0') with pytest.raises(ReplyError): await redis.lset(key, 100, b'value:0') with pytest.raises(TypeError): await redis.lset(key, b'one', b'value:0') @pytest.mark.run_loop async def test_ltrim(redis): key, value = b'key:ltrim', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 10)] await redis.rpush(key, *values) # trim with negative indexes await redis.ltrim(key, 0, -5) test_value = await redis.lrange(key, 0, -1) assert test_value == values[:-4] # trim with positive indexes await redis.ltrim(key, 0, 2) test_value = await redis.lrange(key, 0, -1) assert test_value == values[:3] # try to trim out of range indexes res = await redis.ltrim(key, 100, 110) assert res is True test_value = await redis.lrange(key, 0, -1) assert test_value == [] with pytest.raises(TypeError): await redis.ltrim(None, 0, -1) with pytest.raises(TypeError): await redis.ltrim(key, b'zero', -1) with pytest.raises(TypeError): await redis.ltrim(key, 0, b'one') @pytest.mark.run_loop async def test_rpop(redis): key = b'key:rpop:1' value1, value2 = b'rpop:value:1', b'rpop:value:2' # setup list result = await redis.rpush(key, value1, value2) assert result == 2 # make sure that left value poped test_value = await redis.rpop(key) assert test_value == value2 # pop remaining value, so list should become empty test_value = await redis.rpop(key) assert test_value == value1 # pop from empty list test_value = await redis.rpop(key) assert test_value is None # test encoding param await redis.rpush(key, 'value') test_value = await redis.rpop(key, encoding='utf-8') assert test_value == 'value' with pytest.raises(TypeError): await redis.rpop(None) @pytest.mark.run_loop async def test_rpoplpush(redis): key = b'key:rpoplpush:1' value1, value2 = b'rpoplpush:value:1', b'rpoplpush:value:2' destkey = b'destkey:rpoplpush:1' # setup list await redis.rpush(key, value1, value2) # move value in into head of new list result = await redis.rpoplpush(key, destkey) assert result == value2 # move last value result = await redis.rpoplpush(key, destkey) assert result == value1 # make sure that all values stored in new destkey list result = await redis.lrange(destkey, 0, -1) assert result == [value1, value2] # test encoding param result = await redis.rpoplpush( destkey, key, encoding='utf-8') assert result == 'rpoplpush:value:2' with pytest.raises(TypeError): await redis.rpoplpush(None, destkey) with pytest.raises(TypeError): await redis.rpoplpush(key, None) @pytest.mark.run_loop async def test_rpush(redis): key = b'key:rpush' value1, value2 = b'value:1', b'value:2' # add multiple values to the list, with key that does not exists result = await redis.rpush(key, value1, value2) assert result == 2 # make sure that values actually inserted in right placed and order test_value = await redis.lrange(key, 0, -1) assert test_value == [value1, value2] with pytest.raises(TypeError): await redis.rpush(None, value1) @pytest.mark.run_loop async def test_rpushx(redis): key = b'key:rpushx' value1, value2 = b'value:1', b'value:2' # add multiple values to the list, with key that does not exists # so value should not be pushed result = await redis.rpushx(key, value2) assert result == 0 # init key with list by using regular rpush result = await redis.rpush(key, value1) assert result == 1 result = await redis.rpushx(key, value2) assert result == 2 # make sure that values actually inserted in right placed and order test_value = await redis.lrange(key, 0, -1) assert test_value == [value1, value2] with pytest.raises(TypeError): await redis.rpushx(None, value1) aioredis-1.0.0/tests/sentinel_failover_test.py0000644000175000017500000001403413203624357022401 0ustar alexeyalexey00000000000000import pytest import asyncio import sys from aioredis import ( SlaveNotFoundError, ReadOnlyError, ) pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") if sys.platform == 'win32': pytestmark = pytest.mark.skip(reason="unstable on windows") @pytest.mark.xfail @pytest.mark.run_loop(timeout=40) async def test_auto_failover(start_sentinel, start_server, create_sentinel, create_connection, loop): server1 = start_server('master-failover', ['slave-read-only yes']) start_server('slave-failover1', ['slave-read-only yes'], slaveof=server1) start_server('slave-failover2', ['slave-read-only yes'], slaveof=server1) sentinel1 = start_sentinel('sentinel-failover1', server1, quorum=2) sentinel2 = start_sentinel('sentinel-failover2', server1, quorum=2) sp = await create_sentinel([sentinel1.tcp_address, sentinel2.tcp_address]) _, old_port = await sp.master_address(server1.name) # ignoring host assert old_port == server1.tcp_address.port master = sp.master_for(server1.name) res = await master.role() assert res.role == 'master' assert master.address is not None assert master.address[1] == old_port # wait failover conn = await create_connection(server1.tcp_address) await conn.execute("debug", "sleep", 6) await asyncio.sleep(3, loop=loop) # _, new_port = await sp.master_address(server1.name) # assert new_port != old_port # assert new_port == server2.tcp_address.port assert (await master.set("key", "val")) assert master.address is not None assert master.address[1] != old_port @pytest.mark.run_loop async def test_sentinel_normal(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address]) redis = redis_sentinel.master_for('masterA') info = await redis.role() assert info.role == 'master' key, field, value = b'key:hset', b'bar', b'zap' exists = await redis.hexists(key, field) if exists: ret = await redis.hdel(key, field) assert ret != 1 ret = await redis.hset(key, field, value) assert ret == 1 ret = await redis.hset(key, field, value) assert ret == 0 @pytest.mark.xfail(reason="same sentinel; single master;") @pytest.mark.run_loop async def test_sentinel_slave(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address]) redis = redis_sentinel.slave_for('masterA') info = await redis.role() assert info.role == 'slave' key, field, value = b'key:hset', b'bar', b'zap' # redis = await get_slave_connection() exists = await redis.hexists(key, field) if exists: with pytest.raises(ReadOnlyError): await redis.hdel(key, field) with pytest.raises(ReadOnlyError): await redis.hset(key, field, value) @pytest.mark.xfail(reason="Need proper sentinel configuration") @pytest.mark.run_loop # (timeout=600) async def test_sentinel_slave_fail(sentinel, create_sentinel, loop): redis_sentinel = await create_sentinel([sentinel.tcp_address]) key, field, value = b'key:hset', b'bar', b'zap' redis = redis_sentinel.slave_for('masterA') exists = await redis.hexists(key, field) if exists: with pytest.raises(ReadOnlyError): await redis.hdel(key, field) with pytest.raises(ReadOnlyError): await redis.hset(key, field, value) ret = await redis_sentinel.failover('masterA') assert ret is True await asyncio.sleep(2, loop=loop) with pytest.raises(ReadOnlyError): await redis.hset(key, field, value) ret = await redis_sentinel.failover('masterA') assert ret is True await asyncio.sleep(2, loop=loop) while True: try: await asyncio.sleep(1, loop=loop) await redis.hset(key, field, value) except SlaveNotFoundError: continue except ReadOnlyError: break @pytest.mark.xfail(reason="Need proper sentinel configuration") @pytest.mark.run_loop async def test_sentinel_normal_fail(sentinel, create_sentinel, loop): redis_sentinel = await create_sentinel([sentinel.tcp_address]) key, field, value = b'key:hset', b'bar', b'zap' redis = redis_sentinel.master_for('masterA') exists = await redis.hexists(key, field) if exists: ret = await redis.hdel(key, field) assert ret == 1 ret = await redis.hset(key, field, value) assert ret == 1 ret = await redis_sentinel.failover('masterA') assert ret is True await asyncio.sleep(2, loop=loop) ret = await redis.hset(key, field, value) assert ret == 0 ret = await redis_sentinel.failover('masterA') assert ret is True await asyncio.sleep(2, loop=loop) redis = redis_sentinel.slave_for('masterA') while True: try: await redis.hset(key, field, value) await asyncio.sleep(1, loop=loop) # redis = await get_slave_connection() except ReadOnlyError: break @pytest.mark.xfail(reason="same sentinel; single master;") @pytest.mark.run_loop async def test_failover_command(sentinel, create_sentinel, loop): master_name = 'masterA' redis_sentinel = await create_sentinel([sentinel.tcp_address]) orig_master = await redis_sentinel.master_address(master_name) ret = await redis_sentinel.failover(master_name) assert ret is True await asyncio.sleep(2, loop=loop) new_master = await redis_sentinel.master_address(master_name) assert orig_master != new_master ret = await redis_sentinel.failover(master_name) assert ret is True await asyncio.sleep(2, loop=loop) new_master = await redis_sentinel.master_address(master_name) assert orig_master == new_master redis = redis_sentinel.slave_for(master_name) key, field, value = b'key:hset', b'bar', b'zap' while True: try: await asyncio.sleep(1, loop=loop) await redis.hset(key, field, value) except SlaveNotFoundError: pass except ReadOnlyError: break aioredis-1.0.0/tests/encode_command_test.py0000644000175000017500000000276613203624357021635 0ustar alexeyalexey00000000000000import pytest from aioredis.util import encode_command def test_encode_bytes(): res = encode_command(b'Hello') assert res == b'*1\r\n$5\r\nHello\r\n' res = encode_command(b'Hello', b'World') assert res == b'*2\r\n$5\r\nHello\r\n$5\r\nWorld\r\n' res = encode_command(b'\0') assert res == b'*1\r\n$1\r\n\0\r\n' res = encode_command(bytearray(b'Hello\r\n')) assert res == b'*1\r\n$7\r\nHello\r\n\r\n' def test_encode_bytearray(): res = encode_command(bytearray(b'Hello')) assert res == b'*1\r\n$5\r\nHello\r\n' res = encode_command(bytearray(b'Hello'), bytearray(b'world')) assert res == b'*2\r\n$5\r\nHello\r\n$5\r\nworld\r\n' def test_encode_str(): res = encode_command('Hello') assert res == b'*1\r\n$5\r\nHello\r\n' res = encode_command('Hello', 'world') assert res == b'*2\r\n$5\r\nHello\r\n$5\r\nworld\r\n' def test_encode_int(): res = encode_command(1) assert res == b'*1\r\n$1\r\n1\r\n' res = encode_command(-1) assert res == b'*1\r\n$2\r\n-1\r\n' def test_encode_float(): res = encode_command(1.0) assert res == b'*1\r\n$3\r\n1.0\r\n' res = encode_command(-1.0) assert res == b'*1\r\n$4\r\n-1.0\r\n' def test_encode_empty(): res = encode_command() assert res == b'*0\r\n' def test_encode_errors(): with pytest.raises(TypeError): encode_command(dict()) with pytest.raises(TypeError): encode_command(list()) with pytest.raises(TypeError): encode_command(None) aioredis-1.0.0/tests/locks_test.py0000644000175000017500000000151713203624357020006 0ustar alexeyalexey00000000000000import asyncio import pytest from aioredis.locks import Lock @pytest.mark.run_loop async def test_finished_waiter_cancelled(loop): lock = Lock(loop=loop) ta = asyncio.ensure_future(lock.acquire(), loop=loop) await asyncio.sleep(0, loop=loop) assert lock.locked() tb = asyncio.ensure_future(lock.acquire(), loop=loop) await asyncio.sleep(0, loop=loop) assert len(lock._waiters) == 1 # Create a second waiter, wake up the first, and cancel it. # Without the fix, the second was not woken up and the lock # will never be locked asyncio.ensure_future(lock.acquire(), loop=loop) await asyncio.sleep(0, loop=loop) lock.release() tb.cancel() await asyncio.sleep(0, loop=loop) assert ta.done() assert tb.cancelled() await asyncio.sleep(0, loop=loop) assert lock.locked() aioredis-1.0.0/tests/pubsub_receiver_test.py0000644000175000017500000002372113203624357022060 0ustar alexeyalexey00000000000000import pytest import asyncio import json import sys from unittest import mock from aioredis import ChannelClosedError from aioredis.abc import AbcChannel from aioredis.pubsub import Receiver, _Sender def test_listener_channel(loop): mpsc = Receiver(loop=loop) assert not mpsc.is_active ch_a = mpsc.channel("channel:1") assert isinstance(ch_a, AbcChannel) assert mpsc.is_active ch_b = mpsc.channel('channel:1') assert ch_a is ch_b assert ch_a.name == ch_b.name assert ch_a.is_pattern == ch_b.is_pattern assert mpsc.is_active # remember id; drop refs to objects and create new one; ch_a.close() assert not ch_a.is_active assert not mpsc.is_active ch = mpsc.channel("channel:1") assert ch is not ch_a assert dict(mpsc.channels) == {b'channel:1': ch} assert dict(mpsc.patterns) == {} def test_listener_pattern(loop): mpsc = Receiver(loop=loop) assert not mpsc.is_active ch_a = mpsc.pattern("*") assert isinstance(ch_a, AbcChannel) assert mpsc.is_active ch_b = mpsc.pattern('*') assert ch_a is ch_b assert ch_a.name == ch_b.name assert ch_a.is_pattern == ch_b.is_pattern assert mpsc.is_active # remember id; drop refs to objects and create new one; ch_a.close() assert not ch_a.is_active assert not mpsc.is_active ch = mpsc.pattern("*") assert ch is not ch_a assert dict(mpsc.channels) == {} assert dict(mpsc.patterns) == {b'*': ch} @pytest.mark.run_loop async def test_sender(loop): receiver = mock.Mock() sender = _Sender(receiver, 'name', is_pattern=False, loop=loop) assert isinstance(sender, AbcChannel) assert sender.name == b'name' assert sender.is_pattern is False assert sender.is_active is True with pytest.raises(RuntimeError): await sender.get() assert receiver.mock_calls == [] sender.put_nowait(b'some data') assert receiver.mock_calls == [ mock.call._put_nowait(b'some data', sender=sender), ] def test_sender_close(): receiver = mock.Mock() loop = mock.Mock() sender = _Sender(receiver, 'name', is_pattern=False, loop=loop) sender.close() assert receiver.mock_calls == [mock.call._close(sender)] sender.close() assert receiver.mock_calls == [mock.call._close(sender)] receiver.reset_mock() assert receiver.mock_calls == [] sender.close() assert receiver.mock_calls == [] @pytest.mark.run_loop async def test_subscriptions(create_connection, server, loop): sub = await create_connection(server.tcp_address, loop=loop) pub = await create_connection(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) await sub.execute_pubsub('subscribe', mpsc.channel('channel:1'), mpsc.channel('channel:3')) res = await pub.execute("publish", "channel:3", "Hello world") assert res == 1 res = await pub.execute("publish", "channel:1", "Hello world") assert res == 1 assert mpsc.is_active ch, msg = await mpsc.get() assert ch.name == b'channel:3' assert not ch.is_pattern assert msg == b"Hello world" ch, msg = await mpsc.get() assert ch.name == b'channel:1' assert not ch.is_pattern assert msg == b"Hello world" @pytest.mark.run_loop async def test_unsubscribe(create_connection, server, loop): sub = await create_connection(server.tcp_address, loop=loop) pub = await create_connection(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) await sub.execute_pubsub('subscribe', mpsc.channel('channel:1'), mpsc.channel('channel:3')) res = await pub.execute("publish", "channel:3", "Hello world") assert res == 1 res = await pub.execute("publish", "channel:1", "Hello world") assert res == 1 assert mpsc.is_active assert (await mpsc.wait_message()) is True ch, msg = await mpsc.get() assert ch.name == b'channel:3' assert not ch.is_pattern assert msg == b"Hello world" assert (await mpsc.wait_message()) is True ch, msg = await mpsc.get() assert ch.name == b'channel:1' assert not ch.is_pattern assert msg == b"Hello world" await sub.execute_pubsub('unsubscribe', 'channel:1') assert mpsc.is_active res = await pub.execute("publish", "channel:3", "message") assert res == 1 assert (await mpsc.wait_message()) is True ch, msg = await mpsc.get() assert ch.name == b'channel:3' assert not ch.is_pattern assert msg == b"message" await sub.execute_pubsub('unsubscribe', 'channel:3') assert not mpsc.is_active res = await mpsc.get() assert res is None @pytest.mark.run_loop async def test_stopped(create_connection, server, loop): sub = await create_connection(server.tcp_address, loop=loop) pub = await create_connection(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) assert mpsc.is_active mpsc.stop() with pytest.logs('aioredis', 'DEBUG') as cm: await pub.execute('publish', 'channel:1', b'Hello') await asyncio.sleep(0, loop=loop) assert len(cm.output) == 1 # Receiver must have 1 EndOfStream message warn_messaege = ( "WARNING:aioredis:Pub/Sub listener message after stop: " "sender: <_Sender name:b'channel:1', is_pattern:False, receiver:" ">, data: b'Hello'" ) assert cm.output == [warn_messaege] assert (await mpsc.get()) is None with pytest.raises(ChannelClosedError): await mpsc.get() res = await mpsc.wait_message() assert res is False @pytest.mark.run_loop async def test_wait_message(create_connection, server, loop): sub = await create_connection(server.tcp_address, loop=loop) pub = await create_connection(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) fut = asyncio.ensure_future(mpsc.wait_message(), loop=loop) assert not fut.done() await asyncio.sleep(0, loop=loop) assert not fut.done() await pub.execute('publish', 'channel:1', 'hello') await asyncio.sleep(0, loop=loop) # read in connection await asyncio.sleep(0, loop=loop) # call Future.set_result assert fut.done() res = await fut assert res is True @pytest.mark.run_loop async def test_decode_message(loop): mpsc = Receiver(loop) ch = mpsc.channel('channel:1') ch.put_nowait(b'Some data') res = await mpsc.get(encoding='utf-8') assert isinstance(res[0], _Sender) assert res[1] == 'Some data' ch.put_nowait('{"hello": "world"}') res = await mpsc.get(decoder=json.loads) assert isinstance(res[0], _Sender) assert res[1] == {'hello': 'world'} ch.put_nowait(b'{"hello": "world"}') res = await mpsc.get(encoding='utf-8', decoder=json.loads) assert isinstance(res[0], _Sender) assert res[1] == {'hello': 'world'} @pytest.mark.skipif(sys.version_info >= (3, 6), reason="json.loads accept bytes since Python 3.6") @pytest.mark.run_loop async def test_decode_message_error(loop): mpsc = Receiver(loop) ch = mpsc.channel('channel:1') ch.put_nowait(b'{"hello": "world"}') unexpected = (mock.ANY, {'hello': 'world'}) with pytest.raises(TypeError): assert (await mpsc.get(decoder=json.loads)) == unexpected ch = mpsc.pattern('*') ch.put_nowait((b'channel', b'{"hello": "world"}')) unexpected = (mock.ANY, b'channel', {'hello': 'world'}) with pytest.raises(TypeError): assert (await mpsc.get(decoder=json.loads)) == unexpected @pytest.mark.run_loop async def test_decode_message_for_pattern(loop): mpsc = Receiver(loop) ch = mpsc.pattern('*') ch.put_nowait((b'channel', b'Some data')) res = await mpsc.get(encoding='utf-8') assert isinstance(res[0], _Sender) assert res[1] == (b'channel', 'Some data') ch.put_nowait((b'channel', '{"hello": "world"}')) res = await mpsc.get(decoder=json.loads) assert isinstance(res[0], _Sender) assert res[1] == (b'channel', {'hello': 'world'}) ch.put_nowait((b'channel', b'{"hello": "world"}')) res = await mpsc.get(encoding='utf-8', decoder=json.loads) assert isinstance(res[0], _Sender) assert res[1] == (b'channel', {'hello': 'world'}) @pytest.mark.run_loop async def test_pubsub_receiver_iter(create_redis, server, loop): sub = await create_redis(server.tcp_address, loop=loop) pub = await create_redis(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) async def coro(mpsc): lst = [] async for msg in mpsc.iter(): lst.append(msg) return lst tsk = asyncio.ensure_future(coro(mpsc), loop=loop) snd1, = await sub.subscribe(mpsc.channel('chan:1')) snd2, = await sub.subscribe(mpsc.channel('chan:2')) snd3, = await sub.psubscribe(mpsc.pattern('chan:*')) subscribers = await pub.publish_json('chan:1', {'Hello': 'World'}) assert subscribers > 1 subscribers = await pub.publish_json('chan:2', ['message']) assert subscribers > 1 loop.call_later(0, mpsc.stop) # await asyncio.sleep(0, loop=loop) assert await tsk == [ (snd1, b'{"Hello": "World"}'), (snd3, (b'chan:1', b'{"Hello": "World"}')), (snd2, b'["message"]'), (snd3, (b'chan:2', b'["message"]')), ] assert not mpsc.is_active @pytest.mark.run_loop(timeout=5) async def test_pubsub_receiver_call_stop_with_empty_queue( create_redis, server, loop): sub = await create_redis(server.tcp_address, loop=loop) mpsc = Receiver(loop=loop) # FIXME: currently at least one subscriber is needed snd1, = await sub.subscribe(mpsc.channel('chan:1')) now = loop.time() loop.call_later(.5, mpsc.stop) async for i in mpsc.iter(): # noqa (flake8 bug with async for) assert False, "StopAsyncIteration not raised" dt = loop.time() - now assert dt <= 1.5 assert not mpsc.is_active aioredis-1.0.0/tests/transaction_commands_test.py0000644000175000017500000001574513203624357023111 0ustar alexeyalexey00000000000000import asyncio import pytest from aioredis import ReplyError, MultiExecError, WatchVariableError from aioredis import ConnectionClosedError @pytest.mark.run_loop async def test_multi_exec(redis, loop): await redis.delete('foo', 'bar') tr = redis.multi_exec() f1 = tr.incr('foo') f2 = tr.incr('bar') res = await tr.execute() assert res == [1, 1] res2 = await asyncio.gather(f1, f2, loop=loop) assert res == res2 tr = redis.multi_exec() f1 = tr.incr('foo') f2 = tr.incr('bar') await tr.execute() assert (await f1) == 2 assert (await f2) == 2 tr = redis.multi_exec() f1 = tr.set('foo', 1.0) f2 = tr.incrbyfloat('foo', 1.2) res = await tr.execute() assert res == [True, 2.2] res2 = await asyncio.gather(f1, f2, loop=loop) assert res == res2 tr = redis.multi_exec() f1 = tr.incrby('foo', 1.0) with pytest.raises(MultiExecError, match="increment must be .* int"): await tr.execute() with pytest.raises(TypeError): await f1 @pytest.mark.run_loop async def test_empty(redis): tr = redis.multi_exec() res = await tr.execute() assert res == [] @pytest.mark.run_loop async def test_double_execute(redis): tr = redis.multi_exec() await tr.execute() with pytest.raises(AssertionError): await tr.execute() with pytest.raises(AssertionError): await tr.incr('foo') @pytest.mark.run_loop async def test_connection_closed(redis): tr = redis.multi_exec() fut1 = tr.quit() fut2 = tr.incrby('foo', 1.0) fut3 = tr.incrby('foo', 1) with pytest.raises(MultiExecError): await tr.execute() assert fut1.done() is True assert fut2.done() is True assert fut3.done() is True assert fut1.exception() is not None assert fut2.exception() is not None assert fut3.exception() is not None assert not fut1.cancelled() assert not fut2.cancelled() assert not fut3.cancelled() try: assert (await fut1) == b'OK' except Exception as err: assert isinstance(err, (ConnectionClosedError, ConnectionError)) assert fut2.cancelled() is False assert isinstance(fut2.exception(), TypeError) # assert fut3.cancelled() is True assert fut3.done() and not fut3.cancelled() assert isinstance(fut3.exception(), (ConnectionClosedError, ConnectionError)) @pytest.mark.run_loop async def test_discard(redis): await redis.delete('foo') tr = redis.multi_exec() fut1 = tr.incrby('foo', 1.0) fut2 = tr.connection.execute('MULTI') fut3 = tr.connection.execute('incr', 'foo') with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(TypeError): await fut1 with pytest.raises(ReplyError): await fut2 # with pytest.raises(ReplyError): res = await fut3 assert res == 1 @pytest.mark.run_loop async def test_exec_error(redis): tr = redis.multi_exec() fut = tr.connection.execute('INCRBY', 'key', '1.0') with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(ReplyError): await fut await redis.set('foo', 'bar') tr = redis.multi_exec() fut = tr.incrbyfloat('foo', 1.1) res = await tr.execute(return_exceptions=True) assert isinstance(res[0], ReplyError) with pytest.raises(ReplyError): await fut @pytest.mark.run_loop async def test_command_errors(redis): tr = redis.multi_exec() fut = tr.incrby('key', 1.0) with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(TypeError): await fut @pytest.mark.run_loop async def test_several_command_errors(redis): tr = redis.multi_exec() fut1 = tr.incrby('key', 1.0) fut2 = tr.rename('bar', 'bar') with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(TypeError): await fut1 with pytest.raises(ValueError): await fut2 @pytest.mark.run_loop async def test_error_in_connection(redis): await redis.set('foo', 1) tr = redis.multi_exec() fut1 = tr.mget('foo', None) fut2 = tr.incr('foo') with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(TypeError): await fut1 await fut2 @pytest.mark.run_loop async def test_watch_unwatch(redis): res = await redis.watch('key') assert res is True res = await redis.watch('key', 'key') assert res is True with pytest.raises(TypeError): await redis.watch(None) with pytest.raises(TypeError): await redis.watch('key', None) with pytest.raises(TypeError): await redis.watch('key', 'key', None) res = await redis.unwatch() assert res is True @pytest.mark.run_loop async def test_encoding(redis): res = await redis.set('key', 'value') assert res is True res = await redis.hmset( 'hash-key', 'foo', 'val1', 'bar', 'val2') assert res is True tr = redis.multi_exec() fut1 = tr.get('key') fut2 = tr.get('key', encoding='utf-8') fut3 = tr.hgetall('hash-key', encoding='utf-8') await tr.execute() res = await fut1 assert res == b'value' res = await fut2 assert res == 'value' res = await fut3 assert res == {'foo': 'val1', 'bar': 'val2'} @pytest.mark.run_loop async def test_global_encoding(redis, create_redis, server, loop): redis = await create_redis( server.tcp_address, loop=loop, encoding='utf-8') res = await redis.set('key', 'value') assert res is True res = await redis.hmset( 'hash-key', 'foo', 'val1', 'bar', 'val2') assert res is True tr = redis.multi_exec() fut1 = tr.get('key') fut2 = tr.get('key', encoding='utf-8') fut3 = tr.hgetall('hash-key', encoding='utf-8') await tr.execute() res = await fut1 assert res == 'value' res = await fut2 assert res == 'value' res = await fut3 assert res == {'foo': 'val1', 'bar': 'val2'} @pytest.mark.run_loop async def test_transaction__watch_error(redis, create_redis, server, loop): other = await create_redis( server.tcp_address, loop=loop) ok = await redis.set('foo', 'bar') assert ok is True ok = await redis.watch('foo') assert ok is True ok = await other.set('foo', 'baz') assert ok is True tr = redis.multi_exec() fut1 = tr.set('foo', 'foo') fut2 = tr.get('bar') with pytest.raises(MultiExecError): await tr.execute() with pytest.raises(WatchVariableError): await fut1 with pytest.raises(WatchVariableError): await fut2 @pytest.mark.run_loop async def test_multi_exec_and_pool_release(redis): # Test the case when pool connection is released before # `exec` result is received. slow_script = """ local a = tonumber(redis.call('time')[1]) local b = a + 1 while (a < b) do a = tonumber(redis.call('time')[1]) end """ tr = redis.multi_exec() fut1 = tr.eval(slow_script) ret, = await tr.execute() assert ret is None assert (await fut1) is None aioredis-1.0.0/tests/set_commands_test.py0000644000175000017500000003423313203624357021350 0ustar alexeyalexey00000000000000import pytest async def add(redis, key, members): ok = await redis.connection.execute(b'sadd', key, members) assert ok == 1 @pytest.mark.run_loop async def test_sadd(redis): key, member = b'key:sadd', b'hello' # add member to the set, expected result: 1 test_result = await redis.sadd(key, member) assert test_result == 1 # add other value, expected result: 1 test_result = await redis.sadd(key, b'world') assert test_result == 1 # add existing member to the set, expected result: 0 test_result = await redis.sadd(key, member) assert test_result == 0 with pytest.raises(TypeError): await redis.sadd(None, 10) @pytest.mark.run_loop async def test_scard(redis): key, member = b'key:scard', b'hello' # check that our set is empty one empty_size = await redis.scard(key) assert empty_size == 0 # add more members to the set and check, set size on every step for i in range(1, 11): incr = str(i).encode('utf-8') await add(redis, key, member + incr) current_size = await redis.scard(key) assert current_size == i with pytest.raises(TypeError): await redis.scard(None) @pytest.mark.run_loop async def test_sdiff(redis): key1 = b'key:sdiff:1' key2 = b'key:sdiff:2' key3 = b'key:sdiff:3' members1 = (b'a', b'b', b'c', b'd') members2 = (b'c',) members3 = (b'a', b'c', b'e') await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) await redis.sadd(key3, *members3) # test multiple keys test_result = await redis.sdiff(key1, key2, key3) assert set(test_result) == {b'b', b'd'} # test single key test_result = await redis.sdiff(key2) assert set(test_result) == {b'c'} with pytest.raises(TypeError): await redis.sdiff(None) with pytest.raises(TypeError): await redis.sdiff(key1, None) @pytest.mark.run_loop async def test_sdiffstore(redis): key1 = b'key:sdiffstore:1' key2 = b'key:sdiffstore:2' destkey = b'key:sdiffstore:destkey' members1 = (b'a', b'b', b'c') members2 = (b'c', b'd', b'e') await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) # test basic use case, expected: since diff contains only two members test_result = await redis.sdiffstore(destkey, key1, key2) assert test_result == 2 # make sure that destkey contains 2 members test_result = await redis.scard(destkey) assert test_result == 2 # try sdiffstore in case none of sets exists test_result = await redis.sdiffstore( b'not:' + destkey, b'not:' + key1, b'not:' + key2) assert test_result == 0 with pytest.raises(TypeError): await redis.sdiffstore(None, key1) with pytest.raises(TypeError): await redis.sdiffstore(destkey, None) with pytest.raises(TypeError): await redis.sdiffstore(destkey, key1, None) @pytest.mark.run_loop async def test_sinter(redis): key1 = b'key:sinter:1' key2 = b'key:sinter:2' key3 = b'key:sinter:3' members1 = (b'a', b'b', b'c', b'd') members2 = (b'c',) members3 = (b'a', b'c', b'e') await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) await redis.sadd(key3, *members3) # test multiple keys test_result = await redis.sinter(key1, key2, key3) assert set(test_result) == {b'c'} # test single key test_result = await redis.sinter(key2) assert set(test_result) == {b'c'} with pytest.raises(TypeError): await redis.sinter(None) with pytest.raises(TypeError): await redis.sinter(key1, None) @pytest.mark.run_loop async def test_sinterstore(redis): key1 = b'key:sinterstore:1' key2 = b'key:sinterstore:2' destkey = b'key:sinterstore:destkey' members1 = (b'a', b'b', b'c') members2 = (b'c', b'd', b'e') await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) # test basic use case, expected: since inter contains only one member test_result = await redis.sinterstore(destkey, key1, key2) assert test_result == 1 # make sure that destkey contains only one member test_result = await redis.scard(destkey) assert test_result == 1 # try sinterstore in case none of sets exists test_result = await redis.sinterstore( b'not:' + destkey, b'not:' + key1, b'not:' + key2) assert test_result == 0 with pytest.raises(TypeError): await redis.sinterstore(None, key1) with pytest.raises(TypeError): await redis.sinterstore(destkey, None) with pytest.raises(TypeError): await redis.sinterstore(destkey, key1, None) @pytest.mark.run_loop async def test_sismember(redis): key, member = b'key:sismember', b'hello' # add member to the set, expected result: 1 test_result = await redis.sadd(key, member) assert test_result == 1 # test that value in set test_result = await redis.sismember(key, member) assert test_result == 1 # test that value not in set test_result = await redis.sismember(key, b'world') assert test_result == 0 with pytest.raises(TypeError): await redis.sismember(None, b'world') @pytest.mark.run_loop async def test_smembers(redis): key = b'key:smembers' member1 = b'hello' member2 = b'world' await redis.sadd(key, member1) await redis.sadd(key, member2) # test not empty set test_result = await redis.smembers(key) assert set(test_result) == {member1, member2} # test empty set test_result = await redis.smembers(b'not:' + key) assert test_result == [] # test encoding param test_result = await redis.smembers(key, encoding='utf-8') assert set(test_result) == {'hello', 'world'} with pytest.raises(TypeError): await redis.smembers(None) @pytest.mark.run_loop async def test_smove(redis): key1 = b'key:smove:1' key2 = b'key:smove:2' member1 = b'one' member2 = b'two' member3 = b'three' await redis.sadd(key1, member1, member2) await redis.sadd(key2, member3) # move member2 to second set test_result = await redis.smove(key1, key2, member2) assert test_result == 1 # check first set, member should be removed test_result = await redis.smembers(key1) assert test_result == [member1] # check second set, member should be added test_result = await redis.smembers(key2) assert set(test_result) == {member2, member3} # move to empty set test_result = await redis.smove( key1, b'not:' + key2, member1) assert test_result == 1 # move from empty set (set with under key1 is empty now test_result = await redis.smove( key1, b'not:' + key2, member1) assert test_result == 0 # move from set that does not exists to set tha does not exists too test_result = await redis.smove( b'not:' + key1, b'other:not:' + key2, member1) assert test_result == 0 with pytest.raises(TypeError): await redis.smove(None, key1, member1) with pytest.raises(TypeError): await redis.smove(key1, None, member1) @pytest.mark.run_loop async def test_spop(redis): key = b'key:spop:1' members = b'one', b'two', b'three' await redis.sadd(key, *members) for _ in members: test_result = await redis.spop(key) assert test_result in members # test with encoding members = 'four', 'five', 'six' await redis.sadd(key, *members) for _ in members: test_result = await redis.spop(key, encoding='utf-8') assert test_result in members # make sure set is empty, after all values poped test_result = await redis.smembers(key) assert test_result == [] # try to pop data from empty set test_result = await redis.spop(b'not:' + key) assert test_result is None with pytest.raises(TypeError): await redis.spop(None) @pytest.mark.run_loop async def test_srandmember(redis): key = b'key:srandmember:1' members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' await redis.sadd(key, *members) for _ in members: test_result = await redis.srandmember(key) assert test_result in members # test with encoding test_result = await redis.srandmember(key, encoding='utf-8') strings = {'one', 'two', 'three', 'four', 'five', 'six', 'seven'} assert test_result in strings # make sure set contains all values, and nothing missing test_result = await redis.smembers(key) assert set(test_result) == set(members) # fetch 4 elements for the first time, as result 4 distinct values test_result1 = await redis.srandmember(key, 4) assert len(test_result1) == 4 assert set(test_result1).issubset(members) is True # test negative count, same element may be returned multiple times test_result2 = await redis.srandmember(key, -10) assert len(test_result2) == 10 assert set(test_result2).issubset(members) is True assert len(set(test_result2)) <= len(members) # pull member from empty set test_result = await redis.srandmember(b'not' + key) assert test_result is None with pytest.raises(TypeError): await redis.srandmember(None) @pytest.mark.run_loop async def test_srem(redis): key = b'key:srem:1' members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' await redis.sadd(key, *members) # remove one element from set test_result = await redis.srem(key, members[-1]) assert test_result == 1 # remove not existing element test_result = await redis.srem(key, b'foo') assert test_result == 0 # remove not existing element from not existing set test_result = await redis.srem(b'not:' + key, b'foo') assert test_result == 0 # remove multiple elements from set test_result = await redis.srem(key, *members[:-1]) assert test_result == 6 with pytest.raises(TypeError): await redis.srem(None, members) @pytest.mark.run_loop async def test_sunion(redis): key1 = b'key:sunion:1' key2 = b'key:sunion:2' key3 = b'key:sunion:3' members1 = [b'a', b'b', b'c', b'd'] members2 = [b'c'] members3 = [b'a', b'c', b'e'] await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) await redis.sadd(key3, *members3) # test multiple keys test_result = await redis.sunion(key1, key2, key3) assert set(test_result) == set(members1 + members2 + members3) # test single key test_result = await redis.sunion(key2) assert set(test_result) == {b'c'} with pytest.raises(TypeError): await redis.sunion(None) with pytest.raises(TypeError): await redis.sunion(key1, None) @pytest.mark.run_loop async def test_sunionstore(redis): key1 = b'key:sunionstore:1' key2 = b'key:sunionstore:2' destkey = b'key:sunionstore:destkey' members1 = (b'a', b'b', b'c') members2 = (b'c', b'd', b'e') await redis.sadd(key1, *members1) await redis.sadd(key2, *members2) # test basic use case test_result = await redis.sunionstore(destkey, key1, key2) assert test_result == 5 # make sure that destkey contains 5 members test_result = await redis.scard(destkey) assert test_result == 5 # try sunionstore in case none of sets exists test_result = await redis.sunionstore( b'not:' + destkey, b'not:' + key1, b'not:' + key2) assert test_result == 0 with pytest.raises(TypeError): await redis.sunionstore(None, key1) with pytest.raises(TypeError): await redis.sunionstore(destkey, None) with pytest.raises(TypeError): await redis.sunionstore(destkey, key1, None) @pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_sscan(redis): key = b'key:sscan' for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' member = 'member:{}:{}'.format(foo_or_bar, i).encode('utf-8') await add(redis, key, member) cursor, values = await redis.sscan( key, match=b'member:foo:*') assert len(values) == 3 cursor, values = await redis.sscan( key, match=b'member:bar:*') assert len(values) == 7 # SCAN family functions do not guarantee that the number (count) of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something cursor = b'0' test_values = [] while cursor: cursor, values = await redis.sscan(key, cursor, count=2) test_values.extend(values) assert len(test_values) == 10 with pytest.raises(TypeError): await redis.sscan(None) @pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_isscan(redis): key = b'key:sscan' for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' member = 'member:{}:{}'.format(foo_or_bar, i).encode('utf-8') assert await redis.sadd(key, member) == 1 async def coro(cmd): lst = [] async for i in cmd: lst.append(i) return lst ret = await coro(redis.isscan(key, match=b'member:foo:*')) assert set(ret) == {b'member:foo:3', b'member:foo:6', b'member:foo:9'} ret = await coro(redis.isscan(key, match=b'member:bar:*')) assert set(ret) == {b'member:bar:1', b'member:bar:2', b'member:bar:4', b'member:bar:5', b'member:bar:7', b'member:bar:8', b'member:bar:10'} # SCAN family functions do not guarantee that the number (count) of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something ret = await coro(redis.isscan(key, count=2)) assert set(ret) == {b'member:foo:3', b'member:foo:6', b'member:foo:9', b'member:bar:1', b'member:bar:2', b'member:bar:4', b'member:bar:5', b'member:bar:7', b'member:bar:8', b'member:bar:10'} with pytest.raises(TypeError): await redis.isscan(None) aioredis-1.0.0/tests/pool_test.py0000644000175000017500000003635113203625012017635 0ustar alexeyalexey00000000000000import asyncio import pytest import async_timeout from unittest.mock import patch from aioredis import ( ReplyError, PoolClosedError, ConnectionClosedError, ConnectionsPool, MaxClientsError ) def _assert_defaults(pool): assert isinstance(pool, ConnectionsPool) assert pool.minsize == 1 assert pool.maxsize == 10 assert pool.size == 1 assert pool.freesize == 1 assert pool._close_waiter is None def test_connect(pool): _assert_defaults(pool) def test_global_loop(create_pool, loop, server): asyncio.set_event_loop(loop) pool = loop.run_until_complete(create_pool( server.tcp_address)) _assert_defaults(pool) @pytest.mark.run_loop async def test_clear(pool): _assert_defaults(pool) await pool.clear() assert pool.freesize == 0 @pytest.mark.run_loop @pytest.mark.parametrize('minsize', [None, -100, 0.0, 100]) async def test_minsize(minsize, create_pool, loop, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, minsize=minsize, maxsize=10, loop=loop) @pytest.mark.run_loop @pytest.mark.parametrize('maxsize', [None, -100, 0.0, 1]) async def test_maxsize(maxsize, create_pool, loop, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, minsize=2, maxsize=maxsize, loop=loop) @pytest.mark.run_loop async def test_create_connection_timeout(create_pool, loop, server): with patch.object(loop, 'create_connection') as\ open_conn_mock: open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, loop=loop) with pytest.raises(asyncio.TimeoutError): await create_pool( server.tcp_address, loop=loop, create_connection_timeout=0.1) def test_no_yield_from(pool): with pytest.raises(RuntimeError): with pool: pass # pragma: no cover @pytest.mark.run_loop async def test_simple_command(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=10, loop=loop) with (await pool) as conn: msg = await conn.execute('echo', 'hello') assert msg == b'hello' assert pool.size == 10 assert pool.freesize == 9 assert pool.size == 10 assert pool.freesize == 10 @pytest.mark.run_loop async def test_create_new(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, loop=loop) assert pool.size == 1 assert pool.freesize == 1 with (await pool): assert pool.size == 1 assert pool.freesize == 0 with (await pool): assert pool.size == 2 assert pool.freesize == 0 assert pool.size == 2 assert pool.freesize == 2 @pytest.mark.run_loop async def test_create_constraints(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, maxsize=1, loop=loop) assert pool.size == 1 assert pool.freesize == 1 with (await pool): assert pool.size == 1 assert pool.freesize == 0 with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(pool.acquire(), timeout=0.2, loop=loop) @pytest.mark.run_loop async def test_create_no_minsize(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=0, maxsize=1, loop=loop) assert pool.size == 0 assert pool.freesize == 0 with (await pool): assert pool.size == 1 assert pool.freesize == 0 with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(pool.acquire(), timeout=0.2, loop=loop) assert pool.size == 1 assert pool.freesize == 1 @pytest.mark.run_loop async def test_create_pool_cls(create_pool, loop, server): class MyPool(ConnectionsPool): pass pool = await create_pool( server.tcp_address, loop=loop, pool_cls=MyPool) assert isinstance(pool, MyPool) @pytest.mark.run_loop async def test_create_pool_cls_invalid(create_pool, loop, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, loop=loop, pool_cls=type) @pytest.mark.run_loop async def test_release_closed(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, loop=loop) assert pool.size == 1 assert pool.freesize == 1 with (await pool) as conn: conn.close() await conn.wait_closed() assert pool.size == 0 assert pool.freesize == 0 @pytest.mark.run_loop async def test_release_pending(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, loop=loop) assert pool.size == 1 assert pool.freesize == 1 with pytest.logs('aioredis', 'WARNING') as cm: with (await pool) as conn: try: await asyncio.wait_for( conn.execute( b'blpop', b'somekey:not:exists', b'0'), 0.1, loop=loop) except asyncio.TimeoutError: pass assert pool.size == 0 assert pool.freesize == 0 assert cm.output == [ 'WARNING:aioredis:Connection ' ' has pending commands, closing it.' ] @pytest.mark.run_loop async def test_release_bad_connection(create_pool, create_redis, loop, server): pool = await create_pool( server.tcp_address, loop=loop) conn = await pool.acquire() assert conn.address[0] in ('127.0.0.1', '::1') assert conn.address[1] == server.tcp_address.port other_conn = await create_redis( server.tcp_address, loop=loop) with pytest.raises(AssertionError): pool.release(other_conn) pool.release(conn) other_conn.close() await other_conn.wait_closed() @pytest.mark.run_loop async def test_select_db(create_pool, loop, server): pool = await create_pool( server.tcp_address, loop=loop) await pool.select(1) with (await pool) as conn: assert conn.db == 1 @pytest.mark.run_loop async def test_change_db(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, db=0, loop=loop) assert pool.size == 1 assert pool.freesize == 1 with (await pool) as conn: await conn.select(1) assert pool.size == 0 assert pool.freesize == 0 with (await pool): assert pool.size == 1 assert pool.freesize == 0 await pool.select(1) assert pool.db == 1 assert pool.size == 1 assert pool.freesize == 0 assert pool.size == 0 assert pool.freesize == 0 assert pool.db == 1 @pytest.mark.run_loop async def test_change_db_errors(create_pool, loop, server): pool = await create_pool( server.tcp_address, minsize=1, db=0, loop=loop) with pytest.raises(TypeError): await pool.select(None) assert pool.db == 0 with (await pool): pass assert pool.size == 1 assert pool.freesize == 1 with pytest.raises(TypeError): await pool.select(None) assert pool.db == 0 with pytest.raises(ValueError): await pool.select(-1) assert pool.db == 0 with pytest.raises(ReplyError): await pool.select(100000) assert pool.db == 0 @pytest.mark.xfail(reason="Need to refactor this test") @pytest.mark.run_loop async def test_select_and_create(create_pool, loop, server): # trying to model situation when select and acquire # called simultaneously # but acquire freezes on _wait_select and # then continues with propper db # TODO: refactor this test as there's no _wait_select any more. with async_timeout.timeout(10, loop=loop): pool = await create_pool( server.tcp_address, minsize=1, db=0, loop=loop) db = 0 while True: db = (db + 1) & 1 _, conn = await asyncio.gather(pool.select(db), pool.acquire(), loop=loop) assert pool.db == db pool.release(conn) if conn.db == db: break # await asyncio.wait_for(test(), 3, loop=loop) @pytest.mark.run_loop async def test_response_decoding(create_pool, loop, server): pool = await create_pool( server.tcp_address, encoding='utf-8', loop=loop) assert pool.encoding == 'utf-8' with (await pool) as conn: await conn.execute('set', 'key', 'value') with (await pool) as conn: res = await conn.execute('get', 'key') assert res == 'value' @pytest.mark.run_loop async def test_hgetall_response_decoding(create_pool, loop, server): pool = await create_pool( server.tcp_address, encoding='utf-8', loop=loop) assert pool.encoding == 'utf-8' with (await pool) as conn: await conn.execute('del', 'key1') await conn.execute('hmset', 'key1', 'foo', 'bar') await conn.execute('hmset', 'key1', 'baz', 'zap') with (await pool) as conn: res = await conn.execute('hgetall', 'key1') assert res == ['foo', 'bar', 'baz', 'zap'] @pytest.mark.run_loop async def test_crappy_multiexec(create_pool, loop, server): pool = await create_pool( server.tcp_address, encoding='utf-8', loop=loop, minsize=1, maxsize=1) with (await pool) as conn: await conn.execute('set', 'abc', 'def') await conn.execute('multi') await conn.execute('set', 'abc', 'fgh') assert conn.closed is True with (await pool) as conn: value = await conn.execute('get', 'abc') assert value == 'def' @pytest.mark.run_loop async def test_pool_size_growth(create_pool, server, loop): pool = await create_pool( server.tcp_address, loop=loop, minsize=1, maxsize=1) done = set() tasks = [] async def task1(i): with (await pool): assert pool.size <= pool.maxsize assert pool.freesize == 0 await asyncio.sleep(0.2, loop=loop) done.add(i) async def task2(): with (await pool): assert pool.size <= pool.maxsize assert pool.freesize >= 0 assert done == {0, 1} for _ in range(2): tasks.append(asyncio.ensure_future(task1(_), loop=loop)) tasks.append(asyncio.ensure_future(task2(), loop=loop)) await asyncio.gather(*tasks, loop=loop) @pytest.mark.run_loop async def test_pool_with_closed_connections(create_pool, server, loop): pool = await create_pool( server.tcp_address, loop=loop, minsize=1, maxsize=2) assert 1 == pool.freesize conn1 = pool._pool[0] conn1.close() assert conn1.closed is True assert 1 == pool.freesize with (await pool) as conn2: assert conn2.closed is False assert conn1 is not conn2 @pytest.mark.run_loop async def test_pool_close(create_pool, server, loop): pool = await create_pool( server.tcp_address, loop=loop) assert pool.closed is False with (await pool) as conn: assert (await conn.execute('ping')) == b'PONG' pool.close() await pool.wait_closed() assert pool.closed is True with pytest.raises(PoolClosedError): with (await pool) as conn: assert (await conn.execute('ping')) == b'PONG' @pytest.mark.run_loop async def test_pool_close__used(create_pool, server, loop): pool = await create_pool( server.tcp_address, loop=loop) assert pool.closed is False with (await pool) as conn: pool.close() await pool.wait_closed() assert pool.closed is True with pytest.raises(ConnectionClosedError): await conn.execute('ping') @pytest.mark.run_loop @pytest.redis_version(2, 8, 0, reason="maxclients config setting") @pytest.mark.xfail(reason="Redis returns 'Err max clients reached'") async def test_pool_check_closed_when_exception( create_pool, create_redis, start_server, loop): server = start_server('server-small') redis = await create_redis(server.tcp_address, loop=loop) await redis.config_set('maxclients', 2) with pytest.logs('aioredis', 'DEBUG') as cm: with pytest.raises((MaxClientsError, ConnectionError)): await create_pool(address=tuple(server.tcp_address), minsize=3, loop=loop) assert len(cm.output) >= 3 connect_msg = ( "DEBUG:aioredis:Creating tcp connection" " to ('localhost', {})".format(server.tcp_address.port)) assert cm.output[:2] == [connect_msg, connect_msg] assert cm.output[-1] == "DEBUG:aioredis:Closed 1 connection(s)" @pytest.mark.run_loop async def test_pool_get_connection(create_pool, server, loop): pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, loop=loop) res = await pool.execute("set", "key", "val") assert res == b'OK' res = await pool.execute_pubsub("subscribe", "channel:1") assert res == [[b"subscribe", b"channel:1", 1]] res = await pool.execute("getset", "key", "value") assert res == b'val' res = await pool.execute_pubsub("subscribe", "channel:2") assert res == [[b"subscribe", b"channel:2", 2]] res = await pool.execute("get", "key") assert res == b'value' @pytest.mark.run_loop async def test_pool_get_connection_with_pipelining(create_pool, server, loop): pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, loop=loop) fut1 = pool.execute('set', 'key', 'val') fut2 = pool.execute_pubsub("subscribe", "channel:1") fut3 = pool.execute('getset', 'key', 'next') fut4 = pool.execute_pubsub("subscribe", "channel:2") fut5 = pool.execute('get', 'key') res = await fut1 assert res == b'OK' res = await fut2 assert res == [[b"subscribe", b"channel:1", 1]] res = await fut3 assert res == b'val' res = await fut4 assert res == [[b"subscribe", b"channel:2", 2]] res = await fut5 assert res == b'next' @pytest.mark.run_loop async def test_pool_idle_close(create_pool, start_server, loop): server = start_server('idle') conn = await create_pool(server.tcp_address, minsize=2, loop=loop) ok = await conn.execute("config", "set", "timeout", 1) assert ok == b'OK' await asyncio.sleep(2, loop=loop) assert (await conn.execute('ping')) == b'PONG' @pytest.mark.run_loop async def test_await(create_pool, server, loop): pool = await create_pool( server.tcp_address, minsize=10, loop=loop) with await pool as conn: msg = await conn.execute('echo', 'hello') assert msg == b'hello' @pytest.mark.run_loop async def test_async_with(create_pool, server, loop): pool = await create_pool( server.tcp_address, minsize=10, loop=loop) async with pool.get() as conn: msg = await conn.execute('echo', 'hello') assert msg == b'hello' aioredis-1.0.0/tests/parse_url_test.py0000644000175000017500000001250013203624357020661 0ustar alexeyalexey00000000000000import pytest from aioredis.util import parse_url @pytest.mark.parametrize('url,expected_address,expected_options', [ # redis scheme ('redis://', ('localhost', 6379), {}), ('redis://localhost:6379', ('localhost', 6379), {}), ('redis://localhost:6379/', ('localhost', 6379), {}), ('redis://localhost:6379/0', ('localhost', 6379), {'db': 0}), ('redis://localhost:6379/1', ('localhost', 6379), {'db': 1}), ('redis://localhost:6379?db=1', ('localhost', 6379), {'db': 1}), ('redis://localhost:6379/?db=1', ('localhost', 6379), {'db': 1}), ('redis://redis-host', ('redis-host', 6379), {}), ('redis://redis-host', ('redis-host', 6379), {}), ('redis://host:1234', ('host', 1234), {}), ('redis://user@localhost', ('localhost', 6379), {}), ('redis://:secret@localhost', ('localhost', 6379), {'password': 'secret'}), ('redis://user:secret@localhost', ('localhost', 6379), {'password': 'secret'}), ('redis://localhost?password=secret', ('localhost', 6379), {'password': 'secret'}), ('redis://localhost?encoding=utf-8', ('localhost', 6379), {'encoding': 'utf-8'}), ('redis://localhost?ssl=true', ('localhost', 6379), {'ssl': True}), ('redis://localhost?timeout=1.0', ('localhost', 6379), {'timeout': 1.0}), ('redis://localhost?timeout=10', ('localhost', 6379), {'timeout': 10.0}), # rediss scheme ('rediss://', ('localhost', 6379), {'ssl': True}), ('rediss://localhost:6379', ('localhost', 6379), {'ssl': True}), ('rediss://localhost:6379/', ('localhost', 6379), {'ssl': True}), ('rediss://localhost:6379/0', ('localhost', 6379), {'ssl': True, 'db': 0}), ('rediss://localhost:6379/1', ('localhost', 6379), {'ssl': True, 'db': 1}), ('rediss://localhost:6379?db=1', ('localhost', 6379), {'ssl': True, 'db': 1}), ('rediss://localhost:6379/?db=1', ('localhost', 6379), {'ssl': True, 'db': 1}), ('rediss://redis-host', ('redis-host', 6379), {'ssl': True}), ('rediss://redis-host', ('redis-host', 6379), {'ssl': True}), ('rediss://host:1234', ('host', 1234), {'ssl': True}), ('rediss://user@localhost', ('localhost', 6379), {'ssl': True}), ('rediss://:secret@localhost', ('localhost', 6379), {'ssl': True, 'password': 'secret'}), ('rediss://user:secret@localhost', ('localhost', 6379), {'ssl': True, 'password': 'secret'}), ('rediss://localhost?password=secret', ('localhost', 6379), {'ssl': True, 'password': 'secret'}), ('rediss://localhost?encoding=utf-8', ('localhost', 6379), {'ssl': True, 'encoding': 'utf-8'}), ('rediss://localhost?timeout=1.0', ('localhost', 6379), {'ssl': True, 'timeout': 1.0}), ('rediss://localhost?timeout=10', ('localhost', 6379), {'ssl': True, 'timeout': 10.0}), # unix scheme ('unix:///', '/', {}), ('unix:///redis.sock?db=12', '/redis.sock', {'db': 12}), ('unix:///redis.sock?encoding=utf-8', '/redis.sock', {'encoding': 'utf-8'}), ('unix:///redis.sock?ssl=true', '/redis.sock', {'ssl': True}), ('unix:///redis.sock?timeout=12', '/redis.sock', {'timeout': 12}), # no scheme ('/some/path/to/socket', '/some/path/to/socket', {}), ('/some/path/to/socket?db=1', '/some/path/to/socket?db=1', {}), ]) def test_good_url(url, expected_address, expected_options): address, options = parse_url(url) assert address == expected_address assert options == expected_options @pytest.mark.parametrize('url,expected_error', [ ('bad-scheme://localhost:6379/', ("Unsupported URI scheme", 'bad-scheme')), ('redis:///?db=1&db=2', ("Multiple parameters are not allowed", "db", "2")), ('redis:///?db=', ("Empty parameters are not allowed", "db", "")), ('redis:///?foo=', ("Empty parameters are not allowed", "foo", "")), ('unix://', ('Empty path is not allowed', 'unix://')), ('unix://host:123/', ('Netlocation is not allowed for unix scheme', 'host:123')), ('unix://user:pass@host:123/', ('Netlocation is not allowed for unix scheme', 'user:pass@host:123')), ('unix://user:pass@/', ('Netlocation is not allowed for unix scheme', 'user:pass@')), ('redis:///01', ('Expected integer without leading zeroes', '01')), ('rediss:///01', ('Expected integer without leading zeroes', '01')), ('redis:///?db=01', ('Expected integer without leading zeroes', '01')), ('rediss:///?db=01', ('Expected integer without leading zeroes', '01')), ('redis:///1?db=2', ('Single DB value expected, got path and query', 1, 2)), ('rediss:///1?db=2', ('Single DB value expected, got path and query', 1, 2)), ('redis://:passwd@localhost/?password=passwd', ('Single password value is expected, got in net location and query')), ('redis:///?ssl=1', ("Expected 'ssl' param to be 'true' or 'false' only", '1')), ('redis:///?ssl=True', ("Expected 'ssl' param to be 'true' or 'false' only", 'True')), ]) def test_url_assertions(url, expected_error): with pytest.raises(AssertionError) as exc_info: parse_url(url) assert exc_info.value.args == (expected_error,) @pytest.mark.parametrize('url', [ 'redis:///bad-db-num', 'redis:///0/1', 'redis:///?db=bad-num', 'redis:///?db=-1', ]) def test_db_num_assertions(url): with pytest.raises(AssertionError, match="Invalid decimal integer"): parse_url(url) aioredis-1.0.0/tests/sorted_set_commands_test.py0000644000175000017500000006257413203624357022741 0ustar alexeyalexey00000000000000import itertools import pytest @pytest.mark.run_loop async def test_zadd(redis): key = b'key:zadd' res = await redis.zadd(key, 1, b'one') assert res == 1 res = await redis.zadd(key, 1, b'one') assert res == 0 res = await redis.zadd(key, 1, b'uno') assert res == 1 res = await redis.zadd(key, 2.5, b'two') assert res == 1 res = await redis.zadd(key, 3, b'three', 4, b'four') assert res == 2 res = await redis.zrange(key, 0, -1, withscores=False) assert res == [b'one', b'uno', b'two', b'three', b'four'] with pytest.raises(TypeError): await redis.zadd(None, 1, b'one') with pytest.raises(TypeError): await redis.zadd(key, b'two', b'one') with pytest.raises(TypeError): await redis.zadd(key, 3, b'three', 4) with pytest.raises(TypeError): await redis.zadd(key, 3, b'three', 'four', 4) @pytest.redis_version( 3, 0, 2, reason='ZADD options is available since redis>=3.0.2', ) @pytest.mark.run_loop async def test_zadd_options(redis): key = b'key:zaddopt' res = await redis.zadd(key, 0, b'one') assert res == 1 res = await redis.zadd( key, 1, b'one', 2, b'two', exist=redis.ZSET_IF_EXIST, ) assert res == 0 res = await redis.zscore(key, b'one') assert res == 1 res = await redis.zscore(key, b'two') assert res is None res = await redis.zadd( key, 1, b'one', 2, b'two', exist=redis.ZSET_IF_NOT_EXIST, ) assert res == 1 res = await redis.zscore(key, b'one') assert res == 1 res = await redis.zscore(key, b'two') assert res == 2 res = await redis.zrange(key, 0, -1, withscores=False) assert res == [b'one', b'two'] @pytest.mark.run_loop async def test_zcard(redis): key = b'key:zcard' pairs = [1, b'one', 2, b'two', 3, b'three'] res = await redis.zadd(key, *pairs) assert res == 3 res = await redis.zcard(key) assert res == 3 res = await redis.zadd(key, 1, b'ein') assert res == 1 res = await redis.zcard(key) assert res == 4 with pytest.raises(TypeError): await redis.zcard(None) @pytest.mark.run_loop async def test_zcount(redis): key = b'key:zcount' pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three', 7, b'seven'] res = await redis.zadd(key, *pairs) assert res == 5 res_zcount = await redis.zcount(key) res_zcard = await redis.zcard(key) assert res_zcount == res_zcard res = await redis.zcount(key, 1, 3) assert res == 4 res = await redis.zcount(key, 3, 10) assert res == 2 res = await redis.zcount(key, 100, 200) assert res == 0 res = await redis.zcount( key, 1, 3, exclude=redis.ZSET_EXCLUDE_BOTH) assert res == 1 res = await redis.zcount( key, 1, 3, exclude=redis.ZSET_EXCLUDE_MIN) assert res == 2 res = await redis.zcount( key, 1, 3, exclude=redis.ZSET_EXCLUDE_MAX) assert res == 3 res = await redis.zcount( key, 1, exclude=redis.ZSET_EXCLUDE_MAX) assert res == 5 res = await redis.zcount( key, float('-inf'), 3, exclude=redis.ZSET_EXCLUDE_MIN) assert res == 4 with pytest.raises(TypeError): await redis.zcount(None) with pytest.raises(TypeError): await redis.zcount(key, 'one', 2) with pytest.raises(TypeError): await redis.zcount(key, 1.1, b'two') with pytest.raises(ValueError): await redis.zcount(key, 10, 1) @pytest.mark.run_loop async def test_zincrby(redis): key = b'key:zincrby' pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three'] res = await redis.zadd(key, *pairs) res = await redis.zincrby(key, 1, b'one') assert res == 2 res = await redis.zincrby(key, -5, b'uno') assert res == -4 res = await redis.zincrby(key, 3.14, b'two') assert abs(res - 5.64) <= 0.00001 res = await redis.zincrby(key, -3.14, b'three') assert abs(res - -0.14) <= 0.00001 with pytest.raises(TypeError): await redis.zincrby(None, 5, 'one') with pytest.raises(TypeError): await redis.zincrby(key, 'one', 5) @pytest.mark.run_loop async def test_zinterstore(redis): zset1 = [2, 'one', 2, 'two'] zset2 = [3, 'one', 3, 'three'] await redis.zadd('zset1', *zset1) await redis.zadd('zset2', *zset2) res = await redis.zinterstore('zout', 'zset1', 'zset2') assert res == 1 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 5)] res = await redis.zinterstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_SUM) assert res == 1 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 5)] res = await redis.zinterstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_MIN) assert res == 1 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 2)] res = await redis.zinterstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_MAX) assert res == 1 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 3)] # weights with pytest.raises(AssertionError): await redis.zinterstore('zout', 'zset1', 'zset2', with_weights=True) res = await redis.zinterstore('zout', ('zset1', 2), ('zset2', 2), with_weights=True) assert res == 1 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 10)] @pytest.redis_version( 2, 8, 9, reason='ZLEXCOUNT is available since redis>=2.8.9') @pytest.mark.run_loop async def test_zlexcount(redis): key = b'key:zlexcount' pairs = [0, b'a', 0, b'b', 0, b'c', 0, b'd', 0, b'e'] res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zlexcount(key) assert res == 5 res = await redis.zlexcount(key, min=b'-', max=b'e') assert res == 5 res = await redis.zlexcount(key, min=b'a', max=b'e', include_min=False, include_max=False) assert res == 3 with pytest.raises(TypeError): await redis.zlexcount(None, b'a', b'e') with pytest.raises(TypeError): await redis.zlexcount(key, 10, b'e') with pytest.raises(TypeError): await redis.zlexcount(key, b'a', 20) @pytest.mark.parametrize('encoding', [None, 'utf-8']) @pytest.mark.run_loop async def test_zrange(redis, encoding): key = b'key:zrange' scores = [1, 1, 2.5, 3, 7] if encoding: members = ['one', 'uno', 'two', 'three', 'seven'] else: members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) rev_pairs = list(zip(members, scores)) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrange(key, 0, -1, withscores=False, encoding=encoding) assert res == members res = await redis.zrange(key, 0, -1, withscores=True, encoding=encoding) assert res == rev_pairs res = await redis.zrange(key, -2, -1, withscores=False, encoding=encoding) assert res == members[-2:] res = await redis.zrange(key, 1, 2, withscores=False, encoding=encoding) assert res == members[1:3] with pytest.raises(TypeError): await redis.zrange(None, 1, b'one') with pytest.raises(TypeError): await redis.zrange(key, b'first', -1) with pytest.raises(TypeError): await redis.zrange(key, 0, 'last') @pytest.redis_version( 2, 8, 9, reason='ZRANGEBYLEX is available since redis>=2.8.9') @pytest.mark.run_loop async def test_zrangebylex(redis): key = b'key:zrangebylex' scores = [0] * 5 members = [b'a', b'b', b'c', b'd', b'e'] strings = [x.decode('utf-8') for x in members] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrangebylex(key) assert res == members res = await redis.zrangebylex(key, encoding='utf-8') assert res == strings res = await redis.zrangebylex(key, min=b'-', max=b'd') assert res == members[:-1] res = await redis.zrangebylex(key, min=b'a', max=b'e', include_min=False, include_max=False) assert res == members[1:-1] res = await redis.zrangebylex(key, min=b'x', max=b'z') assert res == [] res = await redis.zrangebylex(key, min=b'e', max=b'a') assert res == [] res = await redis.zrangebylex(key, offset=1, count=2) assert res == members[1:3] with pytest.raises(TypeError): await redis.zrangebylex(None, b'a', b'e') with pytest.raises(TypeError): await redis.zrangebylex(key, 10, b'e') with pytest.raises(TypeError): await redis.zrangebylex(key, b'a', 20) with pytest.raises(TypeError): await redis.zrangebylex(key, b'a', b'e', offset=1) with pytest.raises(TypeError): await redis.zrangebylex(key, b'a', b'e', count=1) with pytest.raises(TypeError): await redis.zrangebylex(key, b'a', b'e', offset='one', count=1) with pytest.raises(TypeError): await redis.zrangebylex(key, b'a', b'e', offset=1, count='one') @pytest.mark.run_loop async def test_zrank(redis): key = b'key:zrank' scores = [1, 1, 2.5, 3, 7] members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 for i, m in enumerate(members): res = await redis.zrank(key, m) assert res == i res = await redis.zrank(key, b'not:exists') assert res is None with pytest.raises(TypeError): await redis.zrank(None, b'one') @pytest.mark.parametrize('encoding', [None, 'utf-8']) @pytest.mark.run_loop async def test_zrangebyscore(redis, encoding): key = b'key:zrangebyscore' scores = [1, 1, 2.5, 3, 7] if encoding: members = ['one', 'uno', 'two', 'three', 'seven'] else: members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) rev_pairs = list(zip(members, scores)) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrangebyscore(key, 1, 7, withscores=False, encoding=encoding) assert res == members res = await redis.zrangebyscore( key, 1, 7, withscores=False, exclude=redis.ZSET_EXCLUDE_BOTH, encoding=encoding) assert res == members[2:-1] res = await redis.zrangebyscore(key, 1, 7, withscores=True, encoding=encoding) assert res == rev_pairs res = await redis.zrangebyscore(key, 1, 10, offset=2, count=2, encoding=encoding) assert res == members[2:4] with pytest.raises(TypeError): await redis.zrangebyscore(None, 1, 7) with pytest.raises(TypeError): await redis.zrangebyscore(key, 10, b'e') with pytest.raises(TypeError): await redis.zrangebyscore(key, b'a', 20) with pytest.raises(TypeError): await redis.zrangebyscore(key, 1, 7, offset=1) with pytest.raises(TypeError): await redis.zrangebyscore(key, 1, 7, count=1) with pytest.raises(TypeError): await redis.zrangebyscore(key, 1, 7, offset='one', count=1) with pytest.raises(TypeError): await redis.zrangebyscore(key, 1, 7, offset=1, count='one') @pytest.mark.run_loop async def test_zrem(redis): key = b'key:zrem' scores = [1, 1, 2.5, 3, 7] members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrem(key, b'uno', b'one') assert res == 2 res = await redis.zrange(key, 0, -1) assert res == members[2:] res = await redis.zrem(key, b'not:exists') assert res == 0 res = await redis.zrem(b'not:' + key, b'not:exists') assert res == 0 with pytest.raises(TypeError): await redis.zrem(None, b'one') @pytest.redis_version( 2, 8, 9, reason='ZREMRANGEBYLEX is available since redis>=2.8.9') @pytest.mark.run_loop async def test_zremrangebylex(redis): key = b'key:zremrangebylex' members = [b'aaaa', b'b', b'c', b'd', b'e', b'foo', b'zap', b'zip', b'ALPHA', b'alpha'] scores = [0] * len(members) pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 10 res = await redis.zremrangebylex(key, b'alpha', b'omega', include_max=True, include_min=True) assert res == 6 res = await redis.zrange(key, 0, -1) assert res == [b'ALPHA', b'aaaa', b'zap', b'zip'] res = await redis.zremrangebylex(key, b'zap', b'zip', include_max=False, include_min=False) assert res == 0 res = await redis.zrange(key, 0, -1) assert res == [b'ALPHA', b'aaaa', b'zap', b'zip'] res = await redis.zremrangebylex(key) assert res == 4 res = await redis.zrange(key, 0, -1) assert res == [] with pytest.raises(TypeError): await redis.zremrangebylex(None, b'a', b'e') with pytest.raises(TypeError): await redis.zremrangebylex(key, 10, b'e') with pytest.raises(TypeError): await redis.zremrangebylex(key, b'a', 20) @pytest.mark.run_loop async def test_zremrangebyrank(redis): key = b'key:zremrangebyrank' scores = [0, 1, 2, 3, 4, 5] members = [b'zero', b'one', b'two', b'three', b'four', b'five'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 6 res = await redis.zremrangebyrank(key, 0, 1) assert res == 2 res = await redis.zrange(key, 0, -1) assert res == members[2:] res = await redis.zremrangebyrank(key, -2, -1) assert res == 2 res = await redis.zrange(key, 0, -1) assert res == members[2:-2] with pytest.raises(TypeError): await redis.zremrangebyrank(None, 1, 2) with pytest.raises(TypeError): await redis.zremrangebyrank(key, b'first', -1) with pytest.raises(TypeError): await redis.zremrangebyrank(key, 0, 'last') @pytest.mark.run_loop async def test_zremrangebyscore(redis): key = b'key:zremrangebyscore' scores = [1, 1, 2.5, 3, 7] members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zremrangebyscore( key, 3, 7.5, exclude=redis.ZSET_EXCLUDE_MIN) assert res == 1 res = await redis.zrange(key, 0, -1) assert res == members[:-1] res = await redis.zremrangebyscore( key, 1, 3, exclude=redis.ZSET_EXCLUDE_BOTH) assert res == 1 res = await redis.zrange(key, 0, -1) assert res == [b'one', b'uno', b'three'] res = await redis.zremrangebyscore(key) assert res == 3 res = await redis.zrange(key, 0, -1) assert res == [] with pytest.raises(TypeError): await redis.zremrangebyscore(None, 1, 2) with pytest.raises(TypeError): await redis.zremrangebyscore(key, b'first', -1) with pytest.raises(TypeError): await redis.zremrangebyscore(key, 0, 'last') @pytest.mark.parametrize('encoding', [None, 'utf-8']) @pytest.mark.run_loop async def test_zrevrange(redis, encoding): key = b'key:zrevrange' scores = [1, 1, 2.5, 3, 7] if encoding: members = ['one', 'uno', 'two', 'three', 'seven'] else: members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) rev_pairs = list(zip(members, scores)) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrevrange(key, 0, -1, withscores=False, encoding=encoding) assert res == members[::-1] res = await redis.zrevrange(key, 0, -1, withscores=True, encoding=encoding) assert res == rev_pairs[::-1] res = await redis.zrevrange(key, -2, -1, withscores=False, encoding=encoding) assert res == members[1::-1] res = await redis.zrevrange(key, 1, 2, withscores=False, encoding=encoding) assert res == members[3:1:-1] with pytest.raises(TypeError): await redis.zrevrange(None, 1, b'one') with pytest.raises(TypeError): await redis.zrevrange(key, b'first', -1) with pytest.raises(TypeError): await redis.zrevrange(key, 0, 'last') @pytest.mark.run_loop async def test_zrevrank(redis): key = b'key:zrevrank' scores = [1, 1, 2.5, 3, 7] members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 for i, m in enumerate(members): res = await redis.zrevrank(key, m) assert res == len(members) - i - 1 res = await redis.zrevrank(key, b'not:exists') assert res is None with pytest.raises(TypeError): await redis.zrevrank(None, b'one') @pytest.mark.run_loop async def test_zscore(redis): key = b'key:zscore' scores = [1, 1, 2.5, 3, 7] members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 for s, m in zip(scores, members): res = await redis.zscore(key, m) assert res == s with pytest.raises(TypeError): await redis.zscore(None, b'one') # Check None on undefined members res = await redis.zscore(key, "undefined") assert res is None @pytest.mark.run_loop async def test_zunionstore(redis): zset1 = [2, 'one', 2, 'two'] zset2 = [3, 'one', 3, 'three'] await redis.zadd('zset1', *zset1) await redis.zadd('zset2', *zset2) res = await redis.zunionstore('zout', 'zset1', 'zset2') assert res == 3 res = await redis.zrange('zout', withscores=True) assert res == [(b'two', 2), (b'three', 3), (b'one', 5)] res = await redis.zunionstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_SUM) assert res == 3 res = await redis.zrange('zout', withscores=True) assert res == [(b'two', 2), (b'three', 3), (b'one', 5)] res = await redis.zunionstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_MIN) assert res == 3 res = await redis.zrange('zout', withscores=True) assert res == [(b'one', 2), (b'two', 2), (b'three', 3)] res = await redis.zunionstore( 'zout', 'zset1', 'zset2', aggregate=redis.ZSET_AGGREGATE_MAX) assert res == 3 res = await redis.zrange('zout', withscores=True) assert res == [(b'two', 2), (b'one', 3), (b'three', 3)] # weights with pytest.raises(AssertionError): await redis.zunionstore('zout', 'zset1', 'zset2', with_weights=True) res = await redis.zunionstore('zout', ('zset1', 2), ('zset2', 2), with_weights=True) assert res == 3 res = await redis.zrange('zout', withscores=True) assert res == [(b'two', 4), (b'three', 6), (b'one', 10)] @pytest.mark.parametrize('encoding', [None, 'utf-8']) @pytest.mark.run_loop async def test_zrevrangebyscore(redis, encoding): key = b'key:zrevrangebyscore' scores = [1, 1, 2.5, 3, 7] if encoding: members = ['one', 'uno', 'two', 'three', 'seven'] else: members = [b'one', b'uno', b'two', b'three', b'seven'] pairs = list(itertools.chain(*zip(scores, members))) rev_pairs = list(zip(members[::-1], scores[::-1])) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrevrangebyscore(key, 7, 1, withscores=False, encoding=encoding) assert res == members[::-1] res = await redis.zrevrangebyscore( key, 7, 1, withscores=False, exclude=redis.ZSET_EXCLUDE_BOTH, encoding=encoding) assert res == members[-2:1:-1] res = await redis.zrevrangebyscore(key, 7, 1, withscores=True, encoding=encoding) assert res == rev_pairs res = await redis.zrevrangebyscore(key, 10, 1, offset=2, count=2, encoding=encoding) assert res == members[-3:-5:-1] with pytest.raises(TypeError): await redis.zrevrangebyscore(None, 1, 7) with pytest.raises(TypeError): await redis.zrevrangebyscore(key, 10, b'e') with pytest.raises(TypeError): await redis.zrevrangebyscore(key, b'a', 20) with pytest.raises(TypeError): await redis.zrevrangebyscore(key, 1, 7, offset=1) with pytest.raises(TypeError): await redis.zrevrangebyscore(key, 1, 7, count=1) with pytest.raises(TypeError): await redis.zrevrangebyscore(key, 1, 7, offset='one', count=1) with pytest.raises(TypeError): await redis.zrevrangebyscore(key, 1, 7, offset=1, count='one') @pytest.redis_version( 2, 8, 9, reason='ZREVRANGEBYLEX is available since redis>=2.8.9') @pytest.mark.run_loop async def test_zrevrangebylex(redis): key = b'key:zrevrangebylex' scores = [0] * 5 members = [b'a', b'b', b'c', b'd', b'e'] strings = [x.decode('utf-8') for x in members] rev_members = members[::-1] rev_strings = strings[::-1] pairs = list(itertools.chain(*zip(scores, members))) res = await redis.zadd(key, *pairs) assert res == 5 res = await redis.zrevrangebylex(key) assert res == rev_members res = await redis.zrevrangebylex(key, encoding='utf-8') assert res == rev_strings res = await redis.zrevrangebylex(key, min=b'-', max=b'd') assert res == rev_members[1:] res = await redis.zrevrangebylex(key, min=b'a', max=b'e', include_min=False, include_max=False) assert res == rev_members[1:-1] res = await redis.zrevrangebylex(key, min=b'x', max=b'z') assert res == [] res = await redis.zrevrangebylex(key, min=b'e', max=b'a') assert res == [] res = await redis.zrevrangebylex(key, offset=1, count=2) assert res == rev_members[1:3] with pytest.raises(TypeError): await redis.zrevrangebylex(None, b'a', b'e') with pytest.raises(TypeError): await redis.zrevrangebylex(key, 10, b'e') with pytest.raises(TypeError): await redis.zrevrangebylex(key, b'a', 20) with pytest.raises(TypeError): await redis.zrevrangebylex(key, b'a', b'e', offset=1) with pytest.raises(TypeError): await redis.zrevrangebylex(key, b'a', b'e', count=1) with pytest.raises(TypeError): await redis.zrevrangebylex(key, b'a', b'e', offset='one', count=1) with pytest.raises(TypeError): await redis.zrevrangebylex(key, b'a', b'e', offset=1, count='one') @pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_zscan(redis): key = b'key:zscan' scores, members = [], [] for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' members.append('zmem:{}:{}'.format(foo_or_bar, i).encode('utf-8')) scores.append(i) pairs = list(itertools.chain(*zip(scores, members))) rev_pairs = set(zip(members, scores)) await redis.zadd(key, *pairs) cursor, values = await redis.zscan(key, match=b'zmem:foo:*') assert len(values) == 3 cursor, values = await redis.zscan(key, match=b'zmem:bar:*') assert len(values) == 7 # SCAN family functions do not guarantee that the number (count) of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something cursor = b'0' test_values = set() while cursor: cursor, values = await redis.zscan(key, cursor, count=2) test_values.update(values) assert test_values == rev_pairs with pytest.raises(TypeError): await redis.zscan(None) @pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') @pytest.mark.run_loop async def test_izscan(redis): key = b'key:zscan' scores, members = [], [] for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' members.append('zmem:{}:{}'.format(foo_or_bar, i).encode('utf-8')) scores.append(i) pairs = list(itertools.chain(*zip(scores, members))) await redis.zadd(key, *pairs) vals = set(zip(members, scores)) async def coro(cmd): res = set() async for key, score in cmd: res.add((key, score)) return res ret = await coro(redis.izscan(key)) assert set(ret) == set(vals) ret = await coro(redis.izscan(key, match=b'zmem:foo:*')) assert set(ret) == set(v for v in vals if b'foo' in v[0]) ret = await coro(redis.izscan(key, match=b'zmem:bar:*')) assert set(ret) == set(v for v in vals if b'bar' in v[0]) # SCAN family functions do not guarantee that the number (count) of # elements returned per call are in a given range. So here # just dummy test, that *count* argument does not break something ret = await coro(redis.izscan(key, count=2)) assert set(ret) == set(vals) with pytest.raises(TypeError): await redis.izscan(None) aioredis-1.0.0/tests/server_commands_test.py0000644000175000017500000002001713203624357022056 0ustar alexeyalexey00000000000000import time import pytest import sys from unittest import mock from aioredis import ReplyError @pytest.mark.run_loop async def test_client_list(redis, server, request): name = request.node.callspec.id assert (await redis.client_setname(name)) res = await redis.client_list() assert isinstance(res, list) res = [dict(i._asdict()) for i in res] expected = { 'addr': mock.ANY, 'fd': mock.ANY, 'age': mock.ANY, 'idle': mock.ANY, 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '0', 'qbuf_free': mock.ANY, 'obl': '0', 'oll': '0', 'omem': '0', 'events': 'r', 'cmd': 'client', 'name': name, } if server.version >= (2, 8, 12): expected['id'] = mock.ANY assert expected in res @pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") async def test_client_list__unixsocket(create_redis, loop, server, request): redis = await create_redis(server.unixsocket, loop=loop) name = request.node.callspec.id assert (await redis.client_setname(name)) res = await redis.client_list() info = [dict(i._asdict()) for i in res] expected = { 'addr': '{}:0'.format(server.unixsocket), 'fd': mock.ANY, 'age': mock.ANY, 'idle': mock.ANY, 'flags': 'U', # Conneted via unix socket 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '0', 'qbuf_free': mock.ANY, 'obl': '0', 'oll': '0', 'omem': '0', 'events': 'r', 'cmd': 'client', 'name': name, } if server.version >= (2, 8, 12): expected['id'] = mock.ANY assert expected in info @pytest.mark.run_loop @pytest.redis_version( 2, 9, 50, reason='CLIENT PAUSE is available since redis >= 2.9.50') async def test_client_pause(redis): ts = time.time() res = await redis.client_pause(2000) assert res is True await redis.ping() assert int(time.time() - ts) >= 2 with pytest.raises(TypeError): await redis.client_pause(2.0) with pytest.raises(ValueError): await redis.client_pause(-1) @pytest.mark.run_loop async def test_client_getname(redis): res = await redis.client_getname() assert res is None ok = await redis.client_setname('TestClient') assert ok is True res = await redis.client_getname() assert res == b'TestClient' res = await redis.client_getname(encoding='utf-8') assert res == 'TestClient' @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") @pytest.mark.run_loop async def test_command(redis): res = await redis.command() assert isinstance(res, list) assert len(res) > 0 @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") @pytest.mark.run_loop async def test_command_count(redis): res = await redis.command_count() assert res > 0 @pytest.redis_version(3, 0, 0, reason="available since Redis 3.0.0") @pytest.mark.run_loop async def test_command_getkeys(redis): res = await redis.command_getkeys('get', 'key') assert res == ['key'] res = await redis.command_getkeys('get', 'key', encoding=None) assert res == [b'key'] res = await redis.command_getkeys('mset', 'k1', 'v1', 'k2', 'v2') assert res == ['k1', 'k2'] res = await redis.command_getkeys('mset', 'k1', 'v1', 'k2') assert res == ['k1', 'k2'] with pytest.raises(ReplyError): assert (await redis.command_getkeys('get')) with pytest.raises(TypeError): assert not (await redis.command_getkeys(None)) @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") @pytest.mark.run_loop async def test_command_info(redis): res = await redis.command_info('get') assert res == [ ['get', 2, ['readonly', 'fast'], 1, 1, 1], ] res = await redis.command_info("unknown-command") assert res == [None] res = await redis.command_info("unknown-command", "unknown-commnad") assert res == [None, None] @pytest.mark.run_loop async def test_config_get(redis, server): res = await redis.config_get('port') assert res == {'port': str(server.tcp_address.port)} res = await redis.config_get() assert len(res) > 0 res = await redis.config_get('unknown_parameter') assert res == {} with pytest.raises(TypeError): await redis.config_get(b'port') @pytest.mark.run_loop async def test_config_rewrite(redis): with pytest.raises(ReplyError): await redis.config_rewrite() @pytest.mark.run_loop async def test_config_set(redis): cur_value = await redis.config_get('slave-read-only') res = await redis.config_set('slave-read-only', 'no') assert res is True res = await redis.config_set( 'slave-read-only', cur_value['slave-read-only']) assert res is True with pytest.raises(ReplyError, match="Unsupported CONFIG parameter"): await redis.config_set('databases', 100) with pytest.raises(TypeError): await redis.config_set(100, 'databases') # @pytest.mark.run_loop # @pytest.mark.skip("Not implemented") # def test_config_resetstat(): # pass @pytest.mark.run_loop async def test_debug_object(redis): with pytest.raises(ReplyError): assert (await redis.debug_object('key')) is None ok = await redis.set('key', 'value') assert ok res = await redis.debug_object('key') assert res is not None @pytest.mark.run_loop async def test_debug_sleep(redis): t1 = await redis.time() ok = await redis.debug_sleep(2) assert ok t2 = await redis.time() assert t2 - t1 >= 2 @pytest.mark.run_loop async def test_dbsize(redis): res = await redis.dbsize() assert res == 0 await redis.set('key', 'value') res = await redis.dbsize() assert res > 0 await redis.flushdb() res = await redis.dbsize() assert res == 0 await redis.set('key', 'value') res = await redis.dbsize() assert res == 1 @pytest.mark.run_loop async def test_info(redis): res = await redis.info() assert isinstance(res, dict) res = await redis.info('all') assert isinstance(res, dict) with pytest.raises(ValueError): await redis.info('') @pytest.mark.run_loop async def test_lastsave(redis): res = await redis.lastsave() assert res > 0 @pytest.mark.run_loop @pytest.redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12') async def test_role(redis): res = await redis.role() assert dict(res._asdict()) == { 'role': 'master', 'replication_offset': mock.ANY, 'slaves': [], } @pytest.mark.run_loop async def test_save(redis): res = await redis.dbsize() assert res == 0 t1 = await redis.lastsave() ok = await redis.save() assert ok t2 = await redis.lastsave() assert t2 >= t1 @pytest.mark.run_loop async def test_time(redis): res = await redis.time() assert isinstance(res, float) pytest.assert_almost_equal(int(res), int(time.time()), delta=10) @pytest.mark.run_loop async def test_time_with_encoding(create_redis, server, loop): redis = await create_redis(server.tcp_address, loop=loop, encoding='utf-8') res = await redis.time() assert isinstance(res, float) pytest.assert_almost_equal(int(res), int(time.time()), delta=10) @pytest.mark.run_loop async def test_slowlog_len(redis): res = await redis.slowlog_len() assert res >= 0 @pytest.mark.run_loop async def test_slowlog_get(redis): res = await redis.slowlog_get() assert isinstance(res, list) assert len(res) >= 0 res = await redis.slowlog_get(2) assert isinstance(res, list) assert 0 <= len(res) <= 2 with pytest.raises(TypeError): assert not (await redis.slowlog_get(1.2)) with pytest.raises(TypeError): assert not (await redis.slowlog_get('1')) @pytest.mark.run_loop async def test_slowlog_reset(redis): ok = await redis.slowlog_reset() assert ok is True aioredis-1.0.0/tests/connection_commands_test.py0000644000175000017500000000543313203624357022714 0ustar alexeyalexey00000000000000import pytest import asyncio from aioredis import ConnectionClosedError, ReplyError from aioredis.pool import ConnectionsPool from aioredis import Redis @pytest.mark.run_loop async def test_repr(create_redis, loop, server): redis = await create_redis( server.tcp_address, db=1, loop=loop) assert repr(redis) in { '>', '>', } redis = await create_redis( server.tcp_address, db=0, loop=loop) assert repr(redis) in { '>', '>', } @pytest.mark.run_loop async def test_auth(redis): expected_message = "ERR Client sent AUTH, but no password is set" with pytest.raises(ReplyError, match=expected_message): await redis.auth('') @pytest.mark.run_loop async def test_echo(redis): resp = await redis.echo('ECHO') assert resp == b'ECHO' with pytest.raises(TypeError): await redis.echo(None) @pytest.mark.run_loop async def test_ping(redis): assert await redis.ping() == b'PONG' @pytest.mark.run_loop async def test_quit(redis, loop): expected = (ConnectionClosedError, ConnectionError) try: assert b'OK' == await redis.quit() except expected: pass if not isinstance(redis.connection, ConnectionsPool): # reader task may not yet been cancelled and _do_close not called # so the ConnectionClosedError may be raised (or ConnectionError) with pytest.raises(expected): try: await redis.ping() except asyncio.CancelledError: assert False, "Cancelled error must not be raised" # wait one loop iteration until it get surely closed await asyncio.sleep(0, loop=loop) assert redis.connection.closed with pytest.raises(ConnectionClosedError): await redis.ping() @pytest.mark.run_loop async def test_select(redis): assert redis.db == 0 resp = await redis.select(1) assert resp is True assert redis.db == 1 assert redis.connection.db == 1 @pytest.mark.run_loop async def test_encoding(create_redis, loop, server): redis = await create_redis( server.tcp_address, db=1, encoding='utf-8', loop=loop) assert redis.encoding == 'utf-8' @pytest.mark.run_loop async def test_yield_from_backwards_compatability(create_redis, server, loop): redis = await create_redis(server.tcp_address, loop=loop) assert isinstance(redis, Redis) # TODO: there should not be warning # with pytest.warns(UserWarning): with await redis as client: assert isinstance(client, Redis) assert client is not redis assert await client.ping() aioredis-1.0.0/tests/task_cancellation_test.py0000644000175000017500000000115413203624357022346 0ustar alexeyalexey00000000000000import pytest import asyncio @pytest.mark.run_loop async def test_future_cancellation(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) ts = loop.time() fut = conn.execute('BLPOP', 'some-list', 5) with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(fut, 1, loop=loop) assert fut.cancelled() # NOTE: Connection becomes available only after timeout expires await conn.execute('TIME') dt = int(loop.time() - ts) assert dt in {4, 5, 6} # self.assertAlmostEqual(dt, 5.0, delta=1) # this fails too often aioredis-1.0.0/tests/geo_commands_test.py0000644000175000017500000003476713203624357021343 0ustar alexeyalexey00000000000000import pytest from aioredis import GeoPoint, GeoMember @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEOADD is available since redis >= 3.2.0') async def test_geoadd(redis): res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') assert res == 1 res = await redis.geoadd( 'geodata', 15.087269, 37.502669, 'Catania', 12.424315, 37.802105, 'Marsala' ) assert res == 2 @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEODIST is available since redis >= 3.2.0') async def test_geodist(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.geodist('geodata', 'Palermo', 'Catania') assert res == 166274.1516 res = await redis.geodist('geodata', 'Palermo', 'Catania', 'km') assert res == 166.2742 @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') async def test_geohash(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.geohash( 'geodata', 'Palermo', encoding='utf-8' ) assert res == ['sqc8b49rny0'] res = await redis.geohash( 'geodata', 'Palermo', 'Catania', encoding='utf-8' ) assert res == ['sqc8b49rny0', 'sqdtr74hyu0'] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEOPOS is available since redis >= 3.2.0') async def test_geopos(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.geopos('geodata', 'Palermo') assert res == [ GeoPoint(longitude=13.36138933897018433, latitude=38.11555639549629859) ] res = await redis.geopos('geodata', 'Catania', 'Palermo') assert res == [ GeoPoint(longitude=15.087267458438873, latitude=37.50266842333162), GeoPoint(longitude=13.36138933897018433, latitude=38.11555639549629859) ] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEO* is available since redis >= 3.2.0') async def test_geo_not_exist_members(redis): res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') assert res == 1 res = await redis.geoadd( 'geodata', 15.087269, 37.502669, 'Catania', 12.424315, 37.802105, 'Marsala' ) assert res == 2 res = await redis.geohash( 'geodata', 'NotExistMember' ) assert res == [None] res = await redis.geodist('geodata', 'NotExistMember', 'Catania') assert res is None res = await redis.geopos( 'geodata', 'Palermo', 'NotExistMember', 'Catania' ) assert res == [ GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ), None, GeoPoint(longitude=15.087267458438873, latitude=37.50266842333162) ] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius_validation(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 with pytest.raises(TypeError): res = await redis.georadius( 'geodata', 15, 37, 200, 'km', count=1.3, encoding='utf-8' ) with pytest.raises(TypeError): res = await redis.georadius( 'geodata', 15, 37, '200', 'km', encoding='utf-8' ) with pytest.raises(ValueError): res = await redis.georadius( 'geodata', 15, 37, 200, 'k', encoding='utf-8' ) with pytest.raises(ValueError): res = await redis.georadius( 'geodata', 15, 37, 200, 'km', sort='DESV', encoding='utf-8' ) @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.georadius( 'geodata', 15, 37, 200, 'km', encoding='utf-8' ) assert res == ['Palermo', 'Catania'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', count=1, encoding='utf-8' ) assert res == ['Catania'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', sort='ASC', encoding='utf-8' ) assert res == ['Catania', 'Palermo'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True, encoding='utf-8' ) assert res == [ GeoMember(member='Palermo', dist=190.4424, coord=None, hash=None), GeoMember(member='Catania', dist=56.4413, coord=None, hash=None) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True, with_coord=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=190.4424, hash=None, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member='Catania', dist=56.4413, hash=None, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True, with_coord=True, with_hash=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=190.4424, hash=3479099956230698, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member='Catania', dist=56.4413, hash=3479447370796909, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_coord=True, with_hash=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=None, hash=3479099956230698, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member='Catania', dist=None, hash=3479447370796909, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_coord=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=None, hash=None, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member='Catania', dist=None, hash=None, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', count=1, sort='DESC', with_hash=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=None, hash=3479099956230698, coord=None ) ] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') async def test_georadiusbymember(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True, encoding='utf-8' ) assert res == [ GeoMember(member='Palermo', dist=0.0, coord=None, hash=None), GeoMember(member='Catania', dist=166.2742, coord=None, hash=None) ] res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', encoding='utf-8' ) assert res == ['Palermo', 'Catania'] res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True, with_coord=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=0.0, hash=None, coord=GeoPoint(13.361389338970184, 38.1155563954963) ), GeoMember( member='Catania', dist=166.2742, hash=None, coord=GeoPoint(15.087267458438873, 37.50266842333162) ) ] res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True, with_coord=True, with_hash=True, encoding='utf-8' ) assert res == [ GeoMember( member='Palermo', dist=0.0, hash=3479099956230698, coord=GeoPoint(13.361389338970184, 38.1155563954963) ), GeoMember( member='Catania', dist=166.2742, hash=3479447370796909, coord=GeoPoint(15.087267458438873, 37.50266842333162) ) ] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') async def test_geohash_binary(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.geohash( 'geodata', 'Palermo' ) assert res == [b'sqc8b49rny0'] res = await redis.geohash( 'geodata', 'Palermo', 'Catania' ) assert res == [b'sqc8b49rny0', b'sqdtr74hyu0'] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius_binary(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.georadius( 'geodata', 15, 37, 200, 'km' ) assert res == [b'Palermo', b'Catania'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', count=1 ) assert res == [b'Catania'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', sort='ASC' ) assert res == [b'Catania', b'Palermo'] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True ) assert res == [ GeoMember(member=b'Palermo', dist=190.4424, coord=None, hash=None), GeoMember(member=b'Catania', dist=56.4413, coord=None, hash=None) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True, with_coord=True ) assert res == [ GeoMember( member=b'Palermo', dist=190.4424, hash=None, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member=b'Catania', dist=56.4413, hash=None, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_dist=True, with_coord=True, with_hash=True ) assert res == [ GeoMember( member=b'Palermo', dist=190.4424, hash=3479099956230698, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member=b'Catania', dist=56.4413, hash=3479447370796909, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_coord=True, with_hash=True ) assert res == [ GeoMember( member=b'Palermo', dist=None, hash=3479099956230698, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member=b'Catania', dist=None, hash=3479447370796909, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', with_coord=True ) assert res == [ GeoMember( member=b'Palermo', dist=None, hash=None, coord=GeoPoint( longitude=13.36138933897018433, latitude=38.11555639549629859 ) ), GeoMember( member=b'Catania', dist=None, hash=None, coord=GeoPoint( longitude=15.087267458438873, latitude=37.50266842333162 ), ) ] res = await redis.georadius( 'geodata', 15, 37, 200, 'km', count=1, sort='DESC', with_hash=True ) assert res == [ GeoMember( member=b'Palermo', dist=None, hash=3479099956230698, coord=None ) ] @pytest.mark.run_loop @pytest.redis_version( 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') async def test_georadiusbymember_binary(redis): res = await redis.geoadd( 'geodata', 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania' ) assert res == 2 res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True ) assert res == [ GeoMember(member=b'Palermo', dist=0.0, coord=None, hash=None), GeoMember(member=b'Catania', dist=166.2742, coord=None, hash=None) ] res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True, with_coord=True ) assert res == [ GeoMember( member=b'Palermo', dist=0.0, hash=None, coord=GeoPoint(13.361389338970184, 38.1155563954963) ), GeoMember( member=b'Catania', dist=166.2742, hash=None, coord=GeoPoint(15.087267458438873, 37.50266842333162) ) ] res = await redis.georadiusbymember( 'geodata', 'Palermo', 200, 'km', with_dist=True, with_coord=True, with_hash=True ) assert res == [ GeoMember( member=b'Palermo', dist=0.0, hash=3479099956230698, coord=GeoPoint(13.361389338970184, 38.1155563954963) ), GeoMember( member=b'Catania', dist=166.2742, hash=3479447370796909, coord=GeoPoint(15.087267458438873, 37.50266842333162) ) ] aioredis-1.0.0/tests/string_commands_test.py0000644000175000017500000004572513203624357022073 0ustar alexeyalexey00000000000000import asyncio import pytest from aioredis import ReplyError async def add(redis, key, value): ok = await redis.set(key, value) assert ok is True @pytest.mark.run_loop async def test_append(redis): len_ = await redis.append('my-key', 'Hello') assert len_ == 5 len_ = await redis.append('my-key', ', world!') assert len_ == 13 val = await redis.connection.execute('GET', 'my-key') assert val == b'Hello, world!' with pytest.raises(TypeError): await redis.append(None, 'value') with pytest.raises(TypeError): await redis.append('none-key', None) @pytest.mark.run_loop async def test_bitcount(redis): await add(redis, 'my-key', b'\x00\x10\x01') ret = await redis.bitcount('my-key') assert ret == 2 ret = await redis.bitcount('my-key', 0, 0) assert ret == 0 ret = await redis.bitcount('my-key', 1, 1) assert ret == 1 ret = await redis.bitcount('my-key', 2, 2) assert ret == 1 ret = await redis.bitcount('my-key', 0, 1) assert ret == 1 ret = await redis.bitcount('my-key', 0, 2) assert ret == 2 ret = await redis.bitcount('my-key', 1, 2) assert ret == 2 ret = await redis.bitcount('my-key', 2, 3) assert ret == 1 ret = await redis.bitcount('my-key', 0, -1) assert ret == 2 with pytest.raises(TypeError): await redis.bitcount(None, 2, 2) with pytest.raises(TypeError): await redis.bitcount('my-key', None, 2) with pytest.raises(TypeError): await redis.bitcount('my-key', 2, None) @pytest.mark.run_loop async def test_bitop_and(redis): key1, value1 = b'key:bitop:and:1', 5 key2, value2 = b'key:bitop:and:2', 7 await add(redis, key1, value1) await add(redis, key2, value2) destkey = b'key:bitop:dest' await redis.bitop_and(destkey, key1, key2) test_value = await redis.get(destkey) assert test_value == b'5' with pytest.raises(TypeError): await redis.bitop_and(None, key1, key2) with pytest.raises(TypeError): await redis.bitop_and(destkey, None) with pytest.raises(TypeError): await redis.bitop_and(destkey, key1, None) @pytest.mark.run_loop async def test_bitop_or(redis): key1, value1 = b'key:bitop:or:1', 5 key2, value2 = b'key:bitop:or:2', 7 await add(redis, key1, value1) await add(redis, key2, value2) destkey = b'key:bitop:dest' await redis.bitop_or(destkey, key1, key2) test_value = await redis.get(destkey) assert test_value == b'7' with pytest.raises(TypeError): await redis.bitop_or(None, key1, key2) with pytest.raises(TypeError): await redis.bitop_or(destkey, None) with pytest.raises(TypeError): await redis.bitop_or(destkey, key1, None) @pytest.mark.run_loop async def test_bitop_xor(redis): key1, value1 = b'key:bitop:xor:1', 5 key2, value2 = b'key:bitop:xor:2', 7 await add(redis, key1, value1) await add(redis, key2, value2) destkey = b'key:bitop:dest' await redis.bitop_xor(destkey, key1, key2) test_value = await redis.get(destkey) assert test_value == b'\x02' with pytest.raises(TypeError): await redis.bitop_xor(None, key1, key2) with pytest.raises(TypeError): await redis.bitop_xor(destkey, None) with pytest.raises(TypeError): await redis.bitop_xor(destkey, key1, None) @pytest.mark.run_loop async def test_bitop_not(redis): key1, value1 = b'key:bitop:not:1', 5 await add(redis, key1, value1) destkey = b'key:bitop:dest' await redis.bitop_not(destkey, key1) res = await redis.get(destkey) assert res == b'\xca' with pytest.raises(TypeError): await redis.bitop_not(None, key1) with pytest.raises(TypeError): await redis.bitop_not(destkey, None) @pytest.redis_version(2, 8, 0, reason='BITPOS is available since redis>=2.8.0') @pytest.mark.run_loop async def test_bitpos(redis): key, value = b'key:bitop', b'\xff\xf0\x00' await add(redis, key, value) test_value = await redis.bitpos(key, 0, end=3) assert test_value == 12 test_value = await redis.bitpos(key, 0, 2, 3) assert test_value == 16 key, value = b'key:bitop', b'\x00\xff\xf0' await add(redis, key, value) test_value = await redis.bitpos(key, 1, 0) assert test_value == 8 test_value = await redis.bitpos(key, 1, 1) assert test_value == 8 key, value = b'key:bitop', b'\x00\x00\x00' await add(redis, key, value) test_value = await redis.bitpos(key, 1, 0) assert test_value == -1 test_value = await redis.bitpos(b'not:' + key, 1) assert test_value == -1 with pytest.raises(TypeError): test_value = await redis.bitpos(None, 1) with pytest.raises(ValueError): test_value = await redis.bitpos(key, 7) @pytest.mark.run_loop async def test_decr(redis): await redis.delete('key') res = await redis.decr('key') assert res == -1 res = await redis.decr('key') assert res == -2 with pytest.raises(ReplyError): await add(redis, 'key', 'val') await redis.decr('key') with pytest.raises(ReplyError): await add(redis, 'key', 1.0) await redis.decr('key') with pytest.raises(TypeError): await redis.decr(None) @pytest.mark.run_loop async def test_decrby(redis): await redis.delete('key') res = await redis.decrby('key', 1) assert res == -1 res = await redis.decrby('key', 10) assert res == -11 res = await redis.decrby('key', -1) assert res == -10 with pytest.raises(ReplyError): await add(redis, 'key', 'val') await redis.decrby('key', 1) with pytest.raises(ReplyError): await add(redis, 'key', 1.0) await redis.decrby('key', 1) with pytest.raises(TypeError): await redis.decrby(None, 1) with pytest.raises(TypeError): await redis.decrby('key', None) @pytest.mark.run_loop async def test_get(redis): await add(redis, 'my-key', 'value') ret = await redis.get('my-key') assert ret == b'value' await add(redis, 'my-key', 123) ret = await redis.get('my-key') assert ret == b'123' ret = await redis.get('bad-key') assert ret is None with pytest.raises(TypeError): await redis.get(None) @pytest.mark.run_loop async def test_getbit(redis): key, value = b'key:getbit', 10 await add(redis, key, value) result = await redis.setbit(key, 7, 1) assert result == 1 test_value = await redis.getbit(key, 0) assert test_value == 0 test_value = await redis.getbit(key, 7) assert test_value == 1 test_value = await redis.getbit(b'not:' + key, 7) assert test_value == 0 test_value = await redis.getbit(key, 100) assert test_value == 0 with pytest.raises(TypeError): await redis.getbit(None, 0) with pytest.raises(TypeError): await redis.getbit(key, b'one') with pytest.raises(ValueError): await redis.getbit(key, -7) @pytest.mark.run_loop async def test_getrange(redis): key, value = b'key:getrange', b'This is a string' await add(redis, key, value) test_value = await redis.getrange(key, 0, 3) assert test_value == b'This' test_value = await redis.getrange(key, -3, -1) assert test_value == b'ing' test_value = await redis.getrange(key, 0, -1) assert test_value == b'This is a string' test_value = await redis.getrange( key, 0, -1, encoding='utf-8') assert test_value == 'This is a string' test_value = await redis.getrange(key, 10, 100) assert test_value == b'string' test_value = await redis.getrange( key, 10, 100, encoding='utf-8') assert test_value == 'string' test_value = await redis.getrange(key, 50, 100) assert test_value == b'' with pytest.raises(TypeError): await redis.getrange(None, 0, 3) with pytest.raises(TypeError): await redis.getrange(key, b'one', 3) with pytest.raises(TypeError): await redis.getrange(key, 0, b'seven') @pytest.mark.run_loop async def test_getset(redis): key, value = b'key:getset', b'hello' await add(redis, key, value) test_value = await redis.getset(key, b'asyncio') assert test_value == b'hello' test_value = await redis.get(key) assert test_value == b'asyncio' test_value = await redis.getset( key, 'world', encoding='utf-8') assert test_value == 'asyncio' test_value = await redis.getset(b'not:' + key, b'asyncio') assert test_value is None test_value = await redis.get(b'not:' + key) assert test_value == b'asyncio' with pytest.raises(TypeError): await redis.getset(None, b'asyncio') @pytest.mark.run_loop async def test_incr(redis): await redis.delete('key') res = await redis.incr('key') assert res == 1 res = await redis.incr('key') assert res == 2 with pytest.raises(ReplyError): await add(redis, 'key', 'val') await redis.incr('key') with pytest.raises(ReplyError): await add(redis, 'key', 1.0) await redis.incr('key') with pytest.raises(TypeError): await redis.incr(None) @pytest.mark.run_loop async def test_incrby(redis): await redis.delete('key') res = await redis.incrby('key', 1) assert res == 1 res = await redis.incrby('key', 10) assert res == 11 res = await redis.incrby('key', -1) assert res == 10 with pytest.raises(ReplyError): await add(redis, 'key', 'val') await redis.incrby('key', 1) with pytest.raises(ReplyError): await add(redis, 'key', 1.0) await redis.incrby('key', 1) with pytest.raises(TypeError): await redis.incrby(None, 1) with pytest.raises(TypeError): await redis.incrby('key', None) @pytest.mark.run_loop async def test_incrbyfloat(redis): await redis.delete('key') res = await redis.incrbyfloat('key', 1.0) assert res == 1.0 res = await redis.incrbyfloat('key', 10.5) assert res == 11.5 res = await redis.incrbyfloat('key', -1.0) assert res == 10.5 await add(redis, 'key', 2) res = await redis.incrbyfloat('key', 0.5) assert res == 2.5 with pytest.raises(ReplyError): await add(redis, 'key', 'val') await redis.incrbyfloat('key', 1.0) with pytest.raises(TypeError): await redis.incrbyfloat(None, 1.0) with pytest.raises(TypeError): await redis.incrbyfloat('key', None) with pytest.raises(TypeError): await redis.incrbyfloat('key', 1) with pytest.raises(TypeError): await redis.incrbyfloat('key', '1.0') @pytest.mark.run_loop async def test_mget(redis): key1, value1 = b'foo', b'bar' key2, value2 = b'baz', b'bzz' await add(redis, key1, value1) await add(redis, key2, value2) res = await redis.mget('key') assert res == [None] res = await redis.mget('key', 'key') assert res == [None, None] res = await redis.mget(key1, key2) assert res == [value1, value2] # test encoding param res = await redis.mget(key1, key2, encoding='utf-8') assert res == ['bar', 'bzz'] with pytest.raises(TypeError): await redis.mget(None, key2) with pytest.raises(TypeError): await redis.mget(key1, None) @pytest.mark.run_loop async def test_mset(redis): key1, value1 = b'key:mset:1', b'hello' key2, value2 = b'key:mset:2', b'world' await redis.mset(key1, value1, key2, value2) test_value = await redis.mget(key1, key2) assert test_value == [value1, value2] await redis.mset(b'other:' + key1, b'other:' + value1) test_value = await redis.get(b'other:' + key1) assert test_value == b'other:' + value1 with pytest.raises(TypeError): await redis.mset(None, value1) with pytest.raises(TypeError): await redis.mset(key1, value1, key1) @pytest.mark.run_loop async def test_msetnx(redis): key1, value1 = b'key:msetnx:1', b'Hello' key2, value2 = b'key:msetnx:2', b'there' key3, value3 = b'key:msetnx:3', b'world' res = await redis.msetnx(key1, value1, key2, value2) assert res == 1 res = await redis.mget(key1, key2) assert res == [value1, value2] res = await redis.msetnx(key2, value2, key3, value3) assert res == 0 res = await redis.mget(key1, key2, key3) assert res == [value1, value2, None] with pytest.raises(TypeError): await redis.msetnx(None, value1) with pytest.raises(TypeError): await redis.msetnx(key1, value1, key2) @pytest.mark.run_loop async def test_psetex(redis, loop): key, value = b'key:psetex:1', b'Hello' # test expiration in milliseconds tr = redis.multi_exec() fut1 = tr.psetex(key, 10, value) fut2 = tr.get(key) await tr.execute() await fut1 test_value = await fut2 assert test_value == value await asyncio.sleep(0.050, loop=loop) test_value = await redis.get(key) assert test_value is None with pytest.raises(TypeError): await redis.psetex(None, 10, value) with pytest.raises(TypeError): await redis.psetex(key, 7.5, value) @pytest.mark.run_loop async def test_set(redis): ok = await redis.set('my-key', 'value') assert ok is True ok = await redis.set(b'my-key', b'value') assert ok is True ok = await redis.set(bytearray(b'my-key'), bytearray(b'value')) assert ok is True with pytest.raises(TypeError): await redis.set(None, 'value') @pytest.mark.run_loop async def test_set_expire(redis, loop): key, value = b'key:set:expire', b'foo' # test expiration in milliseconds tr = redis.multi_exec() fut1 = tr.set(key, value, pexpire=10) fut2 = tr.get(key) await tr.execute() await fut1 result_1 = await fut2 assert result_1 == value await asyncio.sleep(0.050, loop=loop) result_2 = await redis.get(key) assert result_2 is None # same thing but timeout in seconds tr = redis.multi_exec() fut1 = tr.set(key, value, expire=1) fut2 = tr.get(key) await tr.execute() await fut1 result_3 = await fut2 assert result_3 == value await asyncio.sleep(1.050, loop=loop) result_4 = await redis.get(key) assert result_4 is None @pytest.mark.run_loop async def test_set_only_if_not_exists(redis): key, value = b'key:set:only_if_not_exists', b'foo' await redis.set( key, value, exist=redis.SET_IF_NOT_EXIST) result_1 = await redis.get(key) assert result_1 == value # new values not set cos, values exists await redis.set( key, "foo2", exist=redis.SET_IF_NOT_EXIST) result_2 = await redis.get(key) # nothing changed result is same "foo" assert result_2 == value @pytest.mark.run_loop async def test_set_only_if_exists(redis): key, value = b'key:set:only_if_exists', b'only_if_exists:foo' # ensure that such key does not exits, and value not sets await redis.delete(key) await redis.set(key, value, exist=redis.SET_IF_EXIST) result_1 = await redis.get(key) assert result_1 is None # ensure key exits, and value updates await redis.set(key, value) await redis.set(key, b'foo', exist=redis.SET_IF_EXIST) result_2 = await redis.get(key) assert result_2 == b'foo' @pytest.mark.run_loop async def test_set_wrong_input(redis): key, value = b'key:set:', b'foo' with pytest.raises(TypeError): await redis.set(None, value) with pytest.raises(TypeError): await redis.set(key, value, expire=7.8) with pytest.raises(TypeError): await redis.set(key, value, pexpire=7.8) @pytest.mark.run_loop async def test_setbit(redis): key = b'key:setbit' result = await redis.setbit(key, 7, 1) assert result == 0 test_value = await redis.getbit(key, 7) assert test_value == 1 with pytest.raises(TypeError): await redis.setbit(None, 7, 1) with pytest.raises(TypeError): await redis.setbit(key, 7.5, 1) with pytest.raises(ValueError): await redis.setbit(key, -1, 1) with pytest.raises(ValueError): await redis.setbit(key, 1, 7) @pytest.mark.run_loop async def test_setex(redis, loop): key, value = b'key:setex:1', b'Hello' tr = redis.multi_exec() fut1 = tr.setex(key, 1, value) fut2 = tr.get(key) await tr.execute() await fut1 test_value = await fut2 assert test_value == value await asyncio.sleep(1.050, loop=loop) test_value = await redis.get(key) assert test_value is None tr = redis.multi_exec() fut1 = tr.setex(key, 0.1, value) fut2 = tr.get(key) await tr.execute() await fut1 test_value = await fut2 assert test_value == value await asyncio.sleep(0.50, loop=loop) test_value = await redis.get(key) assert test_value is None with pytest.raises(TypeError): await redis.setex(None, 1, value) with pytest.raises(TypeError): await redis.setex(key, b'one', value) @pytest.mark.run_loop async def test_setnx(redis): key, value = b'key:setnx:1', b'Hello' # set fresh new value test_value = await redis.setnx(key, value) # 1 means value has been set assert test_value == 1 # fetch installed value just to be sure test_value = await redis.get(key) assert test_value == value # try to set new value on same key test_value = await redis.setnx(key, b'other:' + value) # 0 means value has not been set assert test_value == 0 # make sure that value was not changed test_value = await redis.get(key) assert test_value == value with pytest.raises(TypeError): await redis.setnx(None, value) @pytest.mark.run_loop async def test_setrange(redis): key, value = b'key:setrange', b'Hello World' await add(redis, key, value) test_value = await redis.setrange(key, 6, b'Redis') assert test_value == 11 test_value = await redis.get(key) assert test_value == b'Hello Redis' test_value = await redis.setrange(b'not:' + key, 6, b'Redis') assert test_value == 11 test_value = await redis.get(b'not:' + key) assert test_value == b'\x00\x00\x00\x00\x00\x00Redis' with pytest.raises(TypeError): await redis.setrange(None, 6, b'Redis') with pytest.raises(TypeError): await redis.setrange(key, 0.7, b'Redis') with pytest.raises(ValueError): await redis.setrange(key, -1, b'Redis') @pytest.mark.run_loop async def test_strlen(redis): key, value = b'key:strlen', b'asyncio' await add(redis, key, value) test_value = await redis.strlen(key) assert test_value == len(value) test_value = await redis.strlen(b'not:' + key) assert test_value == 0 with pytest.raises(TypeError): await redis.strlen(None) @pytest.mark.run_loop async def test_cancel_hang(redis): exists_coro = redis.execute("EXISTS", b"key:test1") exists_coro.cancel() exists_check = await redis.exists(b"key:test2") assert not exists_check @pytest.mark.run_loop async def test_set_enc(create_redis, loop, server): redis = await create_redis( server.tcp_address, loop=loop, encoding='utf-8') TEST_KEY = 'my-key' ok = await redis.set(TEST_KEY, 'value') assert ok is True with pytest.raises(TypeError): await redis.set(None, 'value') await redis.delete(TEST_KEY) aioredis-1.0.0/tests/connection_test.py0000644000175000017500000004547713203624357021047 0ustar alexeyalexey00000000000000import pytest import asyncio import sys from unittest import mock from unittest.mock import patch from aioredis import ( ConnectionClosedError, ProtocolError, RedisConnection, RedisError, ReplyError, Channel, MaxClientsError ) @pytest.mark.run_loop async def test_connect_tcp(request, create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) assert conn.db == 0 assert isinstance(conn.address, tuple) assert conn.address[0] in ('127.0.0.1', '::1') assert conn.address[1] == server.tcp_address.port assert str(conn) == '' conn = await create_connection( ['localhost', server.tcp_address.port], loop=loop) assert conn.db == 0 assert isinstance(conn.address, tuple) assert conn.address[0] in ('127.0.0.1', '::1') assert conn.address[1] == server.tcp_address.port assert str(conn) == '' @pytest.mark.run_loop async def test_connect_inject_connection_cls( request, create_connection, loop, server): class MyConnection(RedisConnection): pass conn = await create_connection( server.tcp_address, loop=loop, connection_cls=MyConnection) assert isinstance(conn, MyConnection) @pytest.mark.run_loop async def test_connect_inject_connection_cls_invalid( request, create_connection, loop, server): with pytest.raises(AssertionError): await create_connection( server.tcp_address, loop=loop, connection_cls=type) @pytest.mark.run_loop async def test_connect_tcp_timeout(request, create_connection, loop, server): with patch.object(loop, 'create_connection') as\ open_conn_mock: open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, loop=loop) with pytest.raises(asyncio.TimeoutError): await create_connection( server.tcp_address, loop=loop, timeout=0.1) @pytest.mark.run_loop async def test_connect_tcp_invalid_timeout( request, create_connection, loop, server): with pytest.raises(ValueError): await create_connection( server.tcp_address, loop=loop, timeout=0) @pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") async def test_connect_unixsocket(create_connection, loop, server): conn = await create_connection( server.unixsocket, db=0, loop=loop) assert conn.db == 0 assert conn.address == server.unixsocket assert str(conn) == '' @pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") async def test_connect_unixsocket_timeout(create_connection, loop, server): with patch.object(loop, 'create_unix_connection') as open_conn_mock: open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, loop=loop) with pytest.raises(asyncio.TimeoutError): await create_connection( server.unixsocket, db=0, loop=loop, timeout=0.1) @pytest.mark.run_loop @pytest.redis_version(2, 8, 0, reason="maxclients config setting") async def test_connect_maxclients(create_connection, loop, start_server): server = start_server('server-maxclients') conn = await create_connection( server.tcp_address, loop=loop) await conn.execute(b'CONFIG', b'SET', 'maxclients', 1) with pytest.raises((MaxClientsError, ConnectionError)): conn2 = await create_connection( server.tcp_address, loop=loop) await conn2.execute('ping') def test_global_loop(create_connection, loop, server): asyncio.set_event_loop(loop) conn = loop.run_until_complete(create_connection( server.tcp_address, db=0)) assert conn.db == 0 assert conn._loop is loop @pytest.mark.run_loop async def test_select_db(create_connection, loop, server): address = server.tcp_address conn = await create_connection(address, loop=loop) assert conn.db == 0 with pytest.raises(ValueError): await create_connection(address, db=-1, loop=loop) with pytest.raises(TypeError): await create_connection(address, db=1.0, loop=loop) with pytest.raises(TypeError): await create_connection( address, db='bad value', loop=loop) with pytest.raises(TypeError): conn = await create_connection( address, db=None, loop=loop) await conn.select(None) with pytest.raises(ReplyError): await create_connection( address, db=100000, loop=loop) await conn.select(1) assert conn.db == 1 await conn.select(2) assert conn.db == 2 await conn.execute('select', 0) assert conn.db == 0 await conn.execute(b'select', 1) assert conn.db == 1 @pytest.mark.run_loop async def test_protocol_error(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) reader = conn._reader with pytest.raises(ProtocolError): reader.feed_data(b'not good redis protocol response') await conn.select(1) assert len(conn._waiters) == 0 def test_close_connection__tcp(create_connection, loop, server): conn = loop.run_until_complete(create_connection( server.tcp_address, loop=loop)) conn.close() with pytest.raises(ConnectionClosedError): loop.run_until_complete(conn.select(1)) conn = loop.run_until_complete(create_connection( server.tcp_address, loop=loop)) conn.close() fut = None with pytest.raises(ConnectionClosedError): fut = conn.select(1) assert fut is None conn = loop.run_until_complete(create_connection( server.tcp_address, loop=loop)) conn.close() with pytest.raises(ConnectionClosedError): conn.execute_pubsub('subscribe', 'channel:1') @pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") async def test_close_connection__socket(create_connection, loop, server): conn = await create_connection( server.unixsocket, loop=loop) conn.close() with pytest.raises(ConnectionClosedError): await conn.select(1) conn = await create_connection( server.unixsocket, loop=loop) conn.close() with pytest.raises(ConnectionClosedError): await conn.execute_pubsub('subscribe', 'channel:1') @pytest.mark.run_loop async def test_closed_connection_with_none_reader( create_connection, loop, server): address = server.tcp_address conn = await create_connection(address, loop=loop) stored_reader = conn._reader conn._reader = None with pytest.raises(ConnectionClosedError): await conn.execute('blpop', 'test', 0) conn._reader = stored_reader conn.close() conn = await create_connection(address, loop=loop) stored_reader = conn._reader conn._reader = None with pytest.raises(ConnectionClosedError): await conn.execute_pubsub('subscribe', 'channel:1') conn._reader = stored_reader conn.close() @pytest.mark.run_loop async def test_wait_closed(create_connection, loop, server): address = server.tcp_address conn = await create_connection(address, loop=loop) reader_task = conn._reader_task conn.close() assert not reader_task.done() await conn.wait_closed() assert reader_task.done() @pytest.mark.run_loop async def test_cancel_wait_closed(create_connection, loop, server): # Regression test: Don't throw error if wait_closed() is cancelled. address = server.tcp_address conn = await create_connection(address, loop=loop) reader_task = conn._reader_task conn.close() task = asyncio.ensure_future(conn.wait_closed(), loop=loop) # Make sure the task is cancelled # after it has been started by the loop. loop.call_soon(task.cancel) await conn.wait_closed() assert reader_task.done() @pytest.mark.run_loop async def test_auth(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) res = await conn.execute('CONFIG', 'SET', 'requirepass', 'pass') assert res == b'OK' conn2 = await create_connection( server.tcp_address, loop=loop) with pytest.raises(ReplyError): await conn2.select(1) res = await conn2.auth('pass') assert res is True res = await conn2.select(1) assert res is True conn3 = await create_connection( server.tcp_address, password='pass', loop=loop) res = await conn3.select(1) assert res is True res = await conn2.execute('CONFIG', 'SET', 'requirepass', '') assert res == b'OK' @pytest.mark.run_loop async def test_decoding(create_connection, loop, server): conn = await create_connection( server.tcp_address, encoding='utf-8', loop=loop) assert conn.encoding == 'utf-8' res = await conn.execute('set', '{prefix}:key1', 'value') assert res == 'OK' res = await conn.execute('get', '{prefix}:key1') assert res == 'value' res = await conn.execute('set', '{prefix}:key1', b'bin-value') assert res == 'OK' res = await conn.execute('get', '{prefix}:key1') assert res == 'bin-value' res = await conn.execute('get', '{prefix}:key1', encoding='ascii') assert res == 'bin-value' res = await conn.execute('get', '{prefix}:key1', encoding=None) assert res == b'bin-value' with pytest.raises(UnicodeDecodeError): await conn.execute('set', '{prefix}:key1', 'значение') await conn.execute('get', '{prefix}:key1', encoding='ascii') conn2 = await create_connection( server.tcp_address, loop=loop) res = await conn2.execute('get', '{prefix}:key1', encoding='utf-8') assert res == 'значение' @pytest.mark.run_loop async def test_execute_exceptions(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) with pytest.raises(TypeError): await conn.execute(None) with pytest.raises(TypeError): await conn.execute("ECHO", None) with pytest.raises(TypeError): await conn.execute("GET", ('a', 'b')) assert len(conn._waiters) == 0 @pytest.mark.run_loop async def test_subscribe_unsubscribe(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) assert conn.in_pubsub == 0 res = await conn.execute('subscribe', 'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] assert conn.in_pubsub == 1 res = await conn.execute('unsubscribe', 'chan:1') assert res == [[b'unsubscribe', b'chan:1', 0]] assert conn.in_pubsub == 0 res = await conn.execute('subscribe', 'chan:1', 'chan:2') assert res == [[b'subscribe', b'chan:1', 1], [b'subscribe', b'chan:2', 2], ] assert conn.in_pubsub == 2 res = await conn.execute('unsubscribe', 'non-existent') assert res == [[b'unsubscribe', b'non-existent', 2]] assert conn.in_pubsub == 2 res = await conn.execute('unsubscribe', 'chan:1') assert res == [[b'unsubscribe', b'chan:1', 1]] assert conn.in_pubsub == 1 @pytest.mark.run_loop async def test_psubscribe_punsubscribe(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) res = await conn.execute('psubscribe', 'chan:*') assert res == [[b'psubscribe', b'chan:*', 1]] assert conn.in_pubsub == 1 @pytest.mark.run_loop async def test_bad_command_in_pubsub(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop) res = await conn.execute('subscribe', 'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] msg = "Connection in SUBSCRIBE mode" with pytest.raises(RedisError, match=msg): await conn.execute('select', 1) with pytest.raises(RedisError, match=msg): conn.execute('get') @pytest.mark.run_loop async def test_pubsub_messages(create_connection, loop, server): sub = await create_connection( server.tcp_address, loop=loop) pub = await create_connection( server.tcp_address, loop=loop) res = await sub.execute('subscribe', 'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] assert b'chan:1' in sub.pubsub_channels chan = sub.pubsub_channels[b'chan:1'] assert str(chan) == "" assert chan.name == b'chan:1' assert chan.is_active is True res = await pub.execute('publish', 'chan:1', 'Hello!') assert res == 1 msg = await chan.get() assert msg == b'Hello!' res = await sub.execute('psubscribe', 'chan:*') assert res == [[b'psubscribe', b'chan:*', 2]] assert b'chan:*' in sub.pubsub_patterns chan2 = sub.pubsub_patterns[b'chan:*'] assert chan2.name == b'chan:*' assert chan2.is_active is True res = await pub.execute('publish', 'chan:1', 'Hello!') assert res == 2 msg = await chan.get() assert msg == b'Hello!' dest_chan, msg = await chan2.get() assert dest_chan == b'chan:1' assert msg == b'Hello!' @pytest.mark.run_loop async def test_multiple_subscribe_unsubscribe(create_connection, loop, server): sub = await create_connection(server.tcp_address, loop=loop) res = await sub.execute_pubsub('subscribe', 'chan:1') ch = sub.pubsub_channels['chan:1'] assert res == [[b'subscribe', b'chan:1', 1]] res = await sub.execute_pubsub('subscribe', b'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] assert ch is sub.pubsub_channels['chan:1'] res = await sub.execute_pubsub('subscribe', ch) assert res == [[b'subscribe', b'chan:1', 1]] assert ch is sub.pubsub_channels['chan:1'] res = await sub.execute_pubsub('unsubscribe', 'chan:1') assert res == [[b'unsubscribe', b'chan:1', 0]] res = await sub.execute_pubsub('unsubscribe', 'chan:1') assert res == [[b'unsubscribe', b'chan:1', 0]] res = await sub.execute_pubsub('psubscribe', 'chan:*') assert res == [[b'psubscribe', b'chan:*', 1]] res = await sub.execute_pubsub('psubscribe', 'chan:*') assert res == [[b'psubscribe', b'chan:*', 1]] res = await sub.execute_pubsub('punsubscribe', 'chan:*') assert res == [[b'punsubscribe', b'chan:*', 0]] res = await sub.execute_pubsub('punsubscribe', 'chan:*') assert res == [[b'punsubscribe', b'chan:*', 0]] @pytest.mark.run_loop async def test_execute_pubsub_errors(create_connection, loop, server): sub = await create_connection( server.tcp_address, loop=loop) with pytest.raises(TypeError): sub.execute_pubsub('subscribe', "chan:1", None) with pytest.raises(TypeError): sub.execute_pubsub('subscribe') with pytest.raises(ValueError): sub.execute_pubsub( 'subscribe', Channel('chan:1', is_pattern=True, loop=loop)) with pytest.raises(ValueError): sub.execute_pubsub( 'unsubscribe', Channel('chan:1', is_pattern=True, loop=loop)) with pytest.raises(ValueError): sub.execute_pubsub( 'psubscribe', Channel('chan:1', is_pattern=False, loop=loop)) with pytest.raises(ValueError): sub.execute_pubsub( 'punsubscribe', Channel('chan:1', is_pattern=False, loop=loop)) @pytest.mark.run_loop async def test_multi_exec(create_connection, loop, server): conn = await create_connection(server.tcp_address, loop=loop) ok = await conn.execute('set', 'foo', 'bar') assert ok == b'OK' ok = await conn.execute("MULTI") assert ok == b'OK' queued = await conn.execute('getset', 'foo', 'baz') assert queued == b'QUEUED' res = await conn.execute("EXEC") assert res == [b'bar'] ok = await conn.execute("MULTI") assert ok == b'OK' queued = await conn.execute('getset', 'foo', 'baz') assert queued == b'QUEUED' res = await conn.execute("DISCARD") assert res == b'OK' @pytest.mark.run_loop async def test_multi_exec__enc(create_connection, loop, server): conn = await create_connection( server.tcp_address, loop=loop, encoding='utf-8') ok = await conn.execute('set', 'foo', 'bar') assert ok == 'OK' ok = await conn.execute("MULTI") assert ok == 'OK' queued = await conn.execute('getset', 'foo', 'baz') assert queued == 'QUEUED' res = await conn.execute("EXEC") assert res == ['bar'] ok = await conn.execute("MULTI") assert ok == 'OK' queued = await conn.execute('getset', 'foo', 'baz') assert queued == 'QUEUED' res = await conn.execute("DISCARD") assert res == 'OK' @pytest.mark.run_loop async def test_connection_parser_argument(create_connection, server, loop): klass = mock.MagicMock() klass.return_value = reader = mock.Mock() conn = await create_connection(server.tcp_address, parser=klass, loop=loop) assert klass.mock_calls == [ mock.call(protocolError=ProtocolError, replyError=ReplyError), ] response = [False] def feed_gets(data, **kwargs): response[0] = data reader.gets.side_effect = lambda *args, **kwargs: response[0] reader.feed.side_effect = feed_gets assert b'+PONG\r\n' == await conn.execute('ping') @pytest.mark.run_loop async def test_connection_idle_close(create_connection, start_server, loop): server = start_server('idle') conn = await create_connection(server.tcp_address, loop=loop) ok = await conn.execute("config", "set", "timeout", 1) assert ok == b'OK' await asyncio.sleep(3, loop=loop) with pytest.raises(ConnectionClosedError): assert await conn.execute('ping') is None @pytest.mark.parametrize('kwargs', [ {}, {'db': 1}, {'encoding': 'utf-8'}, ], ids=repr) @pytest.mark.run_loop async def test_create_connection__tcp_url( create_connection, server_tcp_url, loop, kwargs): url = server_tcp_url(**kwargs) db = kwargs.get('db', 0) enc = kwargs.get('encoding', None) conn = await create_connection(url, loop=loop) pong = b'PONG' if not enc else b'PONG'.decode(enc) assert await conn.execute('ping') == pong assert conn.db == db assert conn.encoding == enc @pytest.mark.skipif('sys.platform == "win32"', reason="No unix sockets on Windows") @pytest.mark.parametrize('kwargs', [ {}, {'db': 1}, {'encoding': 'utf-8'}, ], ids=repr) @pytest.mark.run_loop async def test_create_connection__unix_url( create_connection, server_unix_url, loop, kwargs): url = server_unix_url(**kwargs) db = kwargs.get('db', 0) enc = kwargs.get('encoding', None) conn = await create_connection(url, loop=loop) pong = b'PONG' if not enc else b'PONG'.decode(enc) assert await conn.execute('ping') == pong assert conn.db == db assert conn.encoding == enc aioredis-1.0.0/tests/integration_test.py0000644000175000017500000000542713203624357021222 0ustar alexeyalexey00000000000000import asyncio import pytest import aioredis @pytest.fixture def pool_or_redis(_closable, server, loop): version = tuple(map(int, aioredis.__version__.split('.')[:2])) if version >= (1, 0): factory = aioredis.create_redis_pool else: factory = aioredis.create_pool async def redis_factory(maxsize): redis = await factory(server.tcp_address, loop=loop, minsize=1, maxsize=maxsize) _closable(redis) return redis return redis_factory async def simple_get_set(pool, idx, loop): """A simple test to make sure Redis(pool) can be used as old Pool(Redis). """ val = 'val:{}'.format(idx) with await pool as redis: assert await redis.set('key', val) await redis.get('key', encoding='utf-8') async def pipeline(pool, val, loop): val = 'val:{}'.format(val) with await pool as redis: f1 = redis.set('key', val) f2 = redis.get('key', encoding='utf-8') ok, res = await asyncio.gather(f1, f2, loop=loop) async def transaction(pool, val, loop): val = 'val:{}'.format(val) with await pool as redis: tr = redis.multi_exec() tr.set('key', val) tr.get('key', encoding='utf-8') ok, res = await tr.execute() assert ok, ok assert res == val async def blocking_pop(pool, val, loop): async def lpush(): with await pool as redis: # here v0.3 has bound connection, v1.0 does not; await asyncio.sleep(.1, loop=loop) await redis.lpush('list-key', 'val') async def blpop(): with await pool as redis: # here v0.3 has bound connection, v1.0 does not; res = await redis.blpop( 'list-key', timeout=2, encoding='utf-8') assert res == ['list-key', 'val'], res await asyncio.gather(blpop(), lpush(), loop=loop) @pytest.mark.run_loop @pytest.mark.parametrize('test_case,pool_size', [ (simple_get_set, 1), (pipeline, 1), (transaction, 1), pytest.mark.xfail((blocking_pop, 1), reason="blpop gets connection first and blocks"), (simple_get_set, 10), (pipeline, 10), (transaction, 10), (blocking_pop, 10), ], ids=lambda o: o.__name__) async def test_operations(pool_or_redis, test_case, pool_size, loop): repeat = 100 redis = await pool_or_redis(pool_size) done, pending = await asyncio.wait( [asyncio.ensure_future(test_case(redis, i, loop), loop=loop) for i in range(repeat)], loop=loop) assert not pending success = 0 failures = [] for fut in done: exc = fut.exception() if exc is None: success += 1 else: failures.append(exc) assert repeat == success, failures assert not failures aioredis-1.0.0/tests/pyreader_test.py0000644000175000017500000001466613203624357020517 0ustar alexeyalexey00000000000000import pytest from aioredis.errors import ( ProtocolError, ReplyError, AuthError, MaxClientsError, ) from aioredis.parser import PyReader @pytest.fixture def reader(): return PyReader() def test_nothing(reader): assert reader.gets() is False def test_error_when_feeding_non_string(reader): with pytest.raises(TypeError): reader.feed(1) @pytest.mark.parametrize('data', [ b'x', b'$5\r\nHello world', b':None\r\n', b':1.2\r\n', b':1,2\r\n', ], ids=[ 'Bad control char', 'Invalid bulk length', 'Invalid int - none', 'Invalid int - dot', 'Invalid int - comma', ]) def test_protocol_error(reader, data): reader.feed(data) with pytest.raises(ProtocolError): reader.gets() # not functional any more with pytest.raises(ProtocolError): reader.gets() class CustomExc(Exception): pass @pytest.mark.parametrize('exc,arg', [ (RuntimeError, RuntimeError), (CustomExc, lambda e: CustomExc(e)), ], ids=['RuntimeError', 'callable']) def test_protocol_error_with_custom_class(exc, arg): reader = PyReader(protocolError=arg) reader.feed(b"x") with pytest.raises(exc): reader.gets() @pytest.mark.parametrize('init', [ dict(protocolError="wrong"), dict(replyError="wrong"), ], ids=['wrong protocolError', 'wrong replyError']) def test_fail_with_wrong_error_class(init): with pytest.raises(TypeError): PyReader(**init) def test_error_string(reader): reader.feed(b"-error\r\n") error = reader.gets() assert isinstance(error, ReplyError) assert error.args == ("error",) @pytest.mark.parametrize('error_kind,data', [ (AuthError, b"-NOAUTH auth required\r\n"), (AuthError, b"-ERR invalid password\r\n"), (MaxClientsError, b"-ERR max number of clients reached\r\n"), ]) def test_error_construction(reader, error_kind, data): reader.feed(data) error = reader.gets() assert isinstance(error, ReplyError) assert isinstance(error, error_kind) @pytest.mark.parametrize('exc,arg', [ (RuntimeError, RuntimeError), (CustomExc, lambda e: CustomExc(e)), ], ids=['RuntimeError', 'callable']) def test_error_string_with_custom_class(exc, arg): reader = PyReader(replyError=arg) reader.feed(b"-error\r\n") error = reader.gets() assert isinstance(error, exc) assert error.args == ("error",) def test_errors_in_nested_multi_bulk(reader): reader.feed(b"*2\r\n-err0\r\n-err1\r\n") for r, error in zip(("err0", "err1"), reader.gets()): assert isinstance(error, ReplyError) assert error.args == (r,) def test_integer(reader): value = 2**63-1 # Largest 64-bit signed integer reader.feed((":%d\r\n" % value).encode("ascii")) assert reader.gets() == value def test_status_string(reader): reader.feed(b"+ok\r\n") assert reader.gets() == b"ok" @pytest.mark.parametrize('data,expected', [ (b'$0\r\n\r\n', b''), (b'$-1\r\n', None), (b'$5\r\nhello\r\n', b'hello'), ], ids=['Empty', 'null', 'hello']) def test_bulk_string(reader, data, expected): reader.feed(data) assert reader.gets() == expected def test_bulk_string_without_encoding(reader): snowman = b"\xe2\x98\x83" reader.feed(b"$3\r\n" + snowman + b"\r\n") assert reader.gets() == snowman @pytest.mark.parametrize('encoding,expected', [ ('utf-8', b"\xe2\x98\x83".decode('utf-8')), ('utf-32', b"\xe2\x98\x83"), ], ids=['utf-8', 'utf-32']) def test_bulk_string_with_encoding(encoding, expected): snowman = b"\xe2\x98\x83" reader = PyReader(encoding=encoding) reader.feed(b"$3\r\n" + snowman + b"\r\n") assert reader.gets() == expected def test_bulk_string_with_invalid_encoding(): reader = PyReader(encoding="unknown") reader.feed(b"$5\r\nhello\r\n") with pytest.raises(LookupError): reader.gets() def test_bulk_string_wait_buffer(reader): reader.feed(b'$5\r\nH') assert not reader.gets() reader.feed(b'ello') assert not reader.gets() reader.feed(b'\r\n') assert reader.gets() == b'Hello' @pytest.mark.parametrize('data,expected', [ (b"*-1\r\n", None), (b"*0\r\n", []), (b"*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", [b'hello', b'world']), ], ids=['Null', 'Empty list', 'hello world']) def test_null_multi_bulk(reader, data, expected): reader.feed(data) assert reader.gets() == expected @pytest.mark.parametrize('data', [ (b"*2\r\n$5\r\nhello\r\n", b':1'), (b'*2\r\n:1\r\n*1\r\n', b'+hello'), (b'*2\r\n+hello\r\n+world',), (b'*2\r\n*1\r\n+hello\r\n*1\r\n+world',), ], ids=['First in bulk', 'Error in nested', 'Multiple errors', 'Multiple nested']) def test_multi_bulk_with_invalid_encoding_and_partial_reply(data): reader = PyReader(encoding="unknown") for chunk in data: reader.feed(chunk) assert reader.gets() is False reader.feed(b"\r\n") with pytest.raises(LookupError): reader.gets() reader.feed(b':1\r\n') assert reader.gets() == 1 def test_nested_multi_bulk(reader): reader.feed(b"*2\r\n*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n$1\r\n!\r\n") assert reader.gets() == [[b"hello", b"world"], b"!"] def test_nested_multi_bulk_depth(reader): reader.feed(b"*1\r\n*1\r\n*1\r\n*1\r\n$1\r\n!\r\n") assert reader.gets() == [[[[b"!"]]]] @pytest.mark.parametrize('encoding,expected', [ ('utf-8', b"\xe2\x98\x83".decode('utf-8')), ('utf-32', b"\xe2\x98\x83"), ], ids=['utf-8', 'utf-32']) def test_simple_string_with_encoding(encoding, expected): snowman = b"\xe2\x98\x83" reader = PyReader(encoding=encoding) reader.feed(b"+" + snowman + b"\r\n") assert reader.gets() == expected def test_invalid_offset(reader): data = b"+ok\r\n" with pytest.raises(ValueError): reader.feed(data, 6) def test_invalid_length(reader): data = b"+ok\r\n" with pytest.raises(ValueError): reader.feed(data, 0, 6) def test_ok_offset(reader): data = b"blah+ok\r\n" reader.feed(data, 4) assert reader.gets() == b"ok" def test_ok_length(reader): data = b"blah+ok\r\n" reader.feed(data, 4, len(data)-4) assert reader.gets() == b"ok" @pytest.mark.xfail() def test_maxbuf(reader): defaultmaxbuf = reader.getmaxbuf() reader.setmaxbuf(0) assert 0 == reader.getmaxbuf() reader.setmaxbuf(10000) assert 10000 == reader.getmaxbuf() reader.setmaxbuf(None) assert defaultmaxbuf == reader.getmaxbuf() with pytest.raises(ValueError): reader.setmaxbuf(-4) aioredis-1.0.0/tests/task_cancellation.py0000664000175000017500000000213712724334020021303 0ustar alexeyalexey00000000000000import unittest import asyncio from ._testutil import BaseTest, run_until_complete class CancellationTest(BaseTest): @run_until_complete @unittest.expectedFailure def test_future_cancellation_but_blocks_connection(self): conn1 = yield from self.create_connection( ('localhost', 6379), loop=self.loop) conn2 = yield from self.create_connection( ('localhost', 6379), loop=self.loop) @asyncio.coroutine def task1(): yield from asyncio.sleep(2, loop=self.loop) yield from conn1.execute('LPUSH', 'a_list', 'value') sec1, ms = yield from conn2.execute('TIME') fut = conn2.execute('BLPOP', 'a_list', 3) asyncio.async(task1(), loop=self.loop) try: yield from asyncio.wait_for(fut, .1, loop=self.loop) except asyncio.TimeoutError: pass self.assertTrue(fut.cancelled()) # Future is cancelled but connection is blocked # with BLPOP timeout sec2, ms = yield from conn2.execute('TIME') self.assertEqual(int(sec2) - int(sec1), 1) aioredis-1.0.0/MANIFEST.in0000644000175000017500000000036513203624357015656 0ustar alexeyalexey00000000000000include LICENSE include CHANGES.txt include CONTRIBUTORS.txt include README.rst graft aioredis global-exclude *.pyc *.swp *.*~ recursive-include examples *.py recursive-include tests *.py recursive-include docs *.rst include docs/_build/man/*.* aioredis-1.0.0/examples/0000755000175000017500000000000013203634127015726 5ustar alexeyalexey00000000000000aioredis-1.0.0/examples/pubsub2.py0000644000175000017500000000311713203624357017670 0ustar alexeyalexey00000000000000import asyncio import aioredis async def pubsub(): sub = await aioredis.create_redis( 'redis://localhost') ch1, ch2 = await sub.subscribe('channel:1', 'channel:2') assert isinstance(ch1, aioredis.Channel) assert isinstance(ch2, aioredis.Channel) async def async_reader(channel): while await channel.wait_message(): msg = await channel.get(encoding='utf-8') # ... process message ... print("message in {}: {}".format(channel.name, msg)) tsk1 = asyncio.ensure_future(async_reader(ch1)) # Or alternatively: async def async_reader2(channel): while True: msg = await channel.get(encoding='utf-8') if msg is None: break # ... process message ... print("message in {}: {}".format(channel.name, msg)) tsk2 = asyncio.ensure_future(async_reader2(ch2)) # Publish messages and terminate pub = await aioredis.create_redis( 'redis://localhost') while True: channels = await pub.pubsub_channels('channel:*') if len(channels) == 2: break for msg in ("Hello", ",", "world!"): for ch in ('channel:1', 'channel:2'): await pub.publish(ch, msg) pub.close() sub.close() await asyncio.sleep(0) await pub.wait_closed() await sub.wait_closed() await asyncio.gather(tsk1, tsk2) if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): loop = asyncio.get_event_loop() loop.run_until_complete(pubsub()) aioredis-1.0.0/examples/iscan.py0000644000175000017500000000266613203624357017413 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): redis = await aioredis.create_redis( 'redis://localhost') await redis.delete('something:hash', 'something:set', 'something:zset') await redis.mset('something', 'value', 'something:else', 'else') await redis.hmset('something:hash', 'something:1', 'value:1', 'something:2', 'value:2') await redis.sadd('something:set', 'something:1', 'something:2', 'something:else') await redis.zadd('something:zset', 1, 'something:1', 2, 'something:2', 3, 'something:else') await go(redis) redis.close() await redis.wait_closed() async def go(redis): async for key in redis.iscan(match='something*'): print('Matched:', key) key = 'something:hash' async for name, val in redis.ihscan(key, match='something*'): print('Matched:', name, '->', val) key = 'something:set' async for val in redis.isscan(key, match='something*'): print('Matched:', val) key = 'something:zset' async for val, score in redis.izscan(key, match='something*'): print('Matched:', val, ':', score) if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): loop = asyncio.get_event_loop() loop.run_until_complete(main()) aioredis-1.0.0/examples/sentinel.py0000644000175000017500000000070413203624357020126 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): sentinel_client = await aioredis.create_sentinel( [('localhost', 26379)]) master_redis = sentinel_client.master_for('mymaster') info = await master_redis.role() print("Master role:", info) assert info.role == 'master' sentinel_client.close() await sentinel_client.wait_closed() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/examples/transaction2.py0000644000175000017500000000105513203624357020714 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): redis = await aioredis.create_redis( 'redis://localhost') async def transaction(): tr = redis.multi_exec() future1 = tr.set('foo', '123') future2 = tr.set('bar', '321') result = await tr.execute() assert result == await asyncio.gather(future1, future2) return result await transaction() redis.close() await redis.wait_closed() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) aioredis-1.0.0/examples/pool_pubsub.py0000644000175000017500000000407013203624357020636 0ustar alexeyalexey00000000000000import asyncio import aioredis STOPWORD = 'STOP' async def pubsub(): pool = await aioredis.create_pool( 'redis://localhost', minsize=5, maxsize=10) async def reader(channel): while (await channel.wait_message()): msg = await channel.get(encoding='utf-8') # ... process message ... print("message in {}: {}".format(channel.name, msg)) if msg == STOPWORD: return with await pool as conn: await conn.execute_pubsub('subscribe', 'channel:1') channel = conn.pubsub_channels['channel:1'] await reader(channel) # wait for reader to complete await conn.execute_pubsub('unsubscribe', 'channel:1') # Explicit connection usage conn = await pool.acquire() try: await conn.execute_pubsub('subscribe', 'channel:1') channel = conn.pubsub_channels['channel:1'] await reader(channel) # wait for reader to complete await conn.execute_pubsub('unsubscribe', 'channel:1') finally: pool.release(conn) pool.close() await pool.wait_closed() # closing all open connections def main(): loop = asyncio.get_event_loop() tsk = asyncio.async(pubsub(), loop=loop) async def publish(): pub = await aioredis.create_redis( 'redis://localhost') while not tsk.done(): # wait for clients to subscribe while True: subs = await pub.pubsub_numsub('channel:1') if subs[b'channel:1'] == 1: break await asyncio.sleep(0, loop=loop) # publish some messages for msg in ['one', 'two', 'three']: await pub.publish('channel:1', msg) # send stop word await pub.publish('channel:1', STOPWORD) pub.close() await pub.wait_closed() loop.run_until_complete(asyncio.gather(publish(), tsk, loop=loop)) if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): main() aioredis-1.0.0/examples/pool.py0000644000175000017500000000077413203624357017265 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): pool = await aioredis.create_pool( 'redis://localhost', minsize=5, maxsize=10) with await pool as conn: # low-level redis connection await conn.execute('set', 'my-key', 'value') val = await conn.execute('get', 'my-key') print('raw value:', val) pool.close() await pool.wait_closed() # closing all open connections if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/examples/pipeline.py0000644000175000017500000000242013203624357020107 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): redis = await aioredis.create_redis( 'redis://localhost') # No pipelining; async def wait_each_command(): val = await redis.get('foo') # wait until `val` is available cnt = await redis.incr('bar') # wait until `cnt` is available return val, cnt # Sending multiple commands and then gathering results async def pipelined(): fut1 = redis.get('foo') # issue command and return future fut2 = redis.incr('bar') # issue command and return future # block until results are available val, cnt = await asyncio.gather(fut1, fut2) return val, cnt # Explicit pipeline async def explicit_pipeline(): pipe = redis.pipeline() fut1 = pipe.get('foo') fut2 = pipe.incr('bar') result = await pipe.execute() val, cnt = await asyncio.gather(fut1, fut2) assert result == [val, cnt] return val, cnt res = await wait_each_command() print(res) res = await pipelined() print(res) res = await explicit_pipeline() print(res) redis.close() await redis.wait_closed() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) aioredis-1.0.0/examples/scan.py0000644000175000017500000000112013203624357017222 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): """Scan command example.""" redis = await aioredis.create_redis( 'redis://localhost') await redis.mset('key:1', 'value1', 'key:2', 'value2') cur = b'0' # set initial cursor to 0 while cur: cur, keys = await redis.scan(cur, match='key:*') print("Iteration results:", keys) redis.close() await redis.wait_closed() if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/examples/connection.py0000644000175000017500000000122513203624357020443 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): conn = await aioredis.create_connection( 'redis://localhost', encoding='utf-8') ok = await conn.execute('set', 'my-key', 'some value') assert ok == 'OK', ok str_value = await conn.execute('get', 'my-key') raw_value = await conn.execute('get', 'my-key', encoding=None) assert str_value == 'some value' assert raw_value == b'some value' print('str value:', str_value) print('raw value:', raw_value) # optionally close connection conn.close() await conn.wait_closed() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/examples/commands.py0000644000175000017500000000156013203624357020107 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): # Redis client bound to single connection (no auto reconnection). redis = await aioredis.create_redis( 'redis://localhost') await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() async def redis_pool(): # Redis client bound to pool of connections (auto-reconnecting). redis = await aioredis.create_redis_pool( 'redis://localhost') await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) asyncio.get_event_loop().run_until_complete(redis_pool()) aioredis-1.0.0/examples/transaction.py0000644000175000017500000000074313203624357020635 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): redis = await aioredis.create_redis( 'redis://localhost') await redis.delete('foo', 'bar') tr = redis.multi_exec() fut1 = tr.incr('foo') fut2 = tr.incr('bar') res = await tr.execute() res2 = await asyncio.gather(fut1, fut2) print(res) assert res == res2 redis.close() await redis.wait_closed() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/examples/pool2.py0000644000175000017500000000142013203624357017334 0ustar alexeyalexey00000000000000import asyncio import aioredis async def main(): pool = await aioredis.create_pool( 'redis://localhost') # async with pool.get() as conn: await pool.execute('set', 'my-key', 'value') await async_with(pool) await with_await(pool) pool.close() await pool.wait_closed() async def async_with(pool): async with pool.get() as conn: value = await conn.execute('get', 'my-key') print('raw value:', value) async def with_await(pool): # This is exactly the same as: # with (yield from pool) as conn: with (await pool) as conn: value = await conn.execute('get', 'my-key') print('raw value:', value) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) aioredis-1.0.0/examples/pubsub.py0000644000175000017500000000124013203624357017601 0ustar alexeyalexey00000000000000import asyncio import aioredis async def reader(ch): while (await ch.wait_message()): msg = await ch.get_json() print("Got Message:", msg) async def main(): pub = await aioredis.create_redis( 'redis://localhost') sub = await aioredis.create_redis( 'redis://localhost') res = await sub.subscribe('chan:1') ch1 = res[0] tsk = asyncio.ensure_future(reader(ch1)) res = await pub.publish_json('chan:1', ["Hello", "world"]) assert res == 1 await sub.unsubscribe('chan:1') await tsk sub.close() pub.close() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main()) aioredis-1.0.0/setup.py0000644000175000017500000000367413203624357015640 0ustar alexeyalexey00000000000000import re import os.path import sys import platform from setuptools import setup, find_packages install_requires = ['async-timeout'] if platform.python_implementation() == 'CPython': install_requires.append('hiredis') PY_VER = sys.version_info if PY_VER < (3, 5): raise RuntimeError("aioredis doesn't support Python version prior 3.5") def read(*parts): with open(os.path.join(*parts), 'rt') as f: return f.read().strip() def read_version(): regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'") init_py = os.path.join(os.path.dirname(__file__), 'aioredis', '__init__.py') with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: raise RuntimeError('Cannot find version in aioredis/__init__.py') classifiers = [ 'License :: OSI Approved :: MIT License', 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3 :: Only', 'Operating System :: POSIX', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Framework :: AsyncIO', ] setup(name='aioredis', version=read_version(), description=("asyncio (PEP 3156) Redis support"), long_description="\n\n".join((read('README.rst'), read('CHANGES.txt'))), classifiers=classifiers, platforms=["POSIX"], author="Alexey Popravka", author_email="alexey.popravka@horsedevel.com", url="https://github.com/aio-libs/aioredis", license="MIT", packages=find_packages(exclude=["tests"]), install_requires=install_requires, include_package_data=True, ) aioredis-1.0.0/PKG-INFO0000644000175000017500000003245213203634127015213 0ustar alexeyalexey00000000000000Metadata-Version: 1.1 Name: aioredis Version: 1.0.0 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka Author-email: alexey.popravka@horsedevel.com License: MIT Description-Content-Type: UNKNOWN Description: aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://codecov.io/gh/aio-libs/aioredis/branch/master/graph/badge.svg :target: https://codecov.io/gh/aio-libs/aioredis .. image:: https://ci.appveyor.com/api/projects/status/wngyx6s98o6hsxmt/branch/master?svg=true :target: https://ci.appveyor.com/project/popravich/aioredis Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser Yes Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes SSL/TLS support Yes Sentinel support Yes [1]_ Redis Cluster support WIP Trollius (python 2.7) No Tested CPython versions `3.5, 3.6 `_ [2]_ Tested PyPy3 versions `5.9.0 `_ Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ Support for dev Redis server through low-level API ================================ ============================== .. [1] Sentinel support is available in master branch. This feature is not yet stable and may have some issues. .. [2] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): conn = await aioredis.create_connection( 'redis://localhost', loop=loop) await conn.execute('set', 'my-key', 'value') val = await conn.execute('get', 'my-key') print(val) conn.close() await conn.wait_closed() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): redis = await aioredis.create_redis( 'redis://localhost', loop=loop) await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) redis.close() await redis.wait_closed() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() async def go(): pool = await aioredis.create_pool( 'redis://localhost', minsize=5, maxsize=10, loop=loop) await pool.execute('set', 'my-key', 'value') print(await pool.execute('get', 'my-key')) # graceful shutdown pool.close() await pool.wait_closed() loop.run_until_complete(go()) Requirements ------------ * Python_ 3.5.3+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python protocol parser is implemented as well and can be used through ``parser`` parameter. Benchmarks ---------- Benchmarks can be found here: https://github.com/popravich/python-redis-benchmark Discussion list --------------- *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs Or gitter room: https://gitter.im/aio-libs/Lobby License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _hiredis: https://pypi.python.org/pypi/hiredis .. _travis: https://travis-ci.org/aio-libs/aioredis Changes ------- 1.0.0 (2017-11-17) ^^^^^^^^^^^^^^^^^^ **NEW**: * **Important!** Drop Python 3.3, 3.4 support; (see `#321 `_, `#323 `_ and `#326 `_); * **Important!** Connections pool has been refactored; now ``create_redis`` function will yield ``Redis`` instance instead of ``RedisPool`` (see `#129 `_); * **Important!** Change sorted set commands reply format: return list of tuples instead of plain list for commands accepting ``withscores`` argument (see `#334 `_); * **Important!** Change ``hscan`` command reply format: return list of tuples instead of mixed key-value list (see `#335 `_); * Implement Redis URI support as supported ``address`` argument value (see `#322 `_); * Dropped ``create_reconnecting_redis``, ``create_redis_pool`` should be used instead; * Implement custom ``StreamReader`` (see `#273 `_); * Implement Sentinel support (see `#181 `_); * Implement pure-python parser (see `#212 `_); * Add ``migrate_keys`` command (see `#187 `_); * Add ``zrevrangebylex`` command (see `#201 `_); * Add ``command``, ``command_count``, ``command_getkeys`` and ``command_info`` commands (see `#229 `_); * Add ``ping`` support in pubsub connection (see `#264 `_); * Add ``exist`` parameter to ``zadd`` command (see `#288 `_); * Add ``MaxClientsError`` and implement ``ReplyError`` specialization (see `#325 `_); * Add ``encoding`` parameter to sorted set commands (see `#289 `_); **FIX**: * Fix ``CancelledError`` in ``conn._reader_task`` (see `#301 `_); * Fix pending commands cancellation with ``CancelledError``, use explicit exception instead of calling ``cancel()`` method (see `#316 `_); * Correct error message on Sentinel discovery of master/slave with password (see `#327 `_); * Fix ``bytearray`` support as command argument (see `#329 `_); * Fix critical bug in patched asyncio.Lock (see `#256 `_); * Fix Multi/Exec transaction canceled error (see `#225 `_); * Add missing arguments to ``create_redis`` and ``create_redis_pool``; * Fix deprecation warning (see `#191 `_); * Make correct ``__aiter__()`` (see `#192 `_); * Backward compatibility fix for ``with (yield from pool) as conn:`` (see `#205 `_); * Fixed pubsub receiver stop() (see `#211 `_); **MISC**: * Multiple test fixes; * Add PyPy3 to build matrix; * Update dependencies versions; * Add missing Python 3.6 classifier; 0.3.5 (2017-11-08) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix for indistinguishable futures cancellation with ``asyncio.CancelledError`` (see `#316 `_), cherry-picked from master; 0.3.4 (2017-10-25) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix time command result decoding when using connection-wide encoding setting (see `#266 `_); 0.3.3 (2017-06-30) ^^^^^^^^^^^^^^^^^^ **FIX**: * Critical bug fixed in patched asyncio.Lock (see `#256 `_); 0.3.2 (2017-06-21) ^^^^^^^^^^^^^^^^^^ **NEW**: * Added ``zrevrangebylex`` command (see `#201 `_), cherry-picked from master; * Add connection timeout (see `#221 `_), cherry-picked from master; **FIX**: * Fixed pool close warning (see `#239 `_ and `#236 `_), cherry-picked from master; * Fixed asyncio Lock deadlock issue (see `#231 `_ and `#241 `_); 0.3.1 (2017-05-09) ^^^^^^^^^^^^^^^^^^ **FIX**: * Fix pubsub Receiver missing iter() method (see `#203 `_); 0.3.0 (2017-01-11) ^^^^^^^^^^^^^^^^^^ **NEW**: * Pub/Sub connection commands accept ``Channel`` instances (see `#168 `_); * Implement new Pub/Sub MPSC (multi-producers, single-consumer) Queue -- ``aioredis.pubsub.Receiver`` (see `#176 `_); * Add ``aioredis.abc`` module providing abstract base classes defining interface for basic lib components; (see `#176 `_); * Implement Geo commands support (see `#177 `_ and `#179 `_); **FIX**: * Minor tests fixes; **MISC**: * Update examples and docs to use ``async``/``await`` syntax also keeping ``yield from`` examples for history (see `#173 `_); * Reflow Travis CI configuration; add Python 3.6 section (see `#170 `_); * Add AppVeyor integration to run tests on Windows (see `#180 `_); * Update multiple development requirements; Platform: POSIX Classifier: License :: OSI Approved :: MIT License Classifier: Development Status :: 4 - Beta Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Operating System :: POSIX Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries Classifier: Framework :: AsyncIO