Beaker-1.10.0/0000755000076500000240000000000013305324602013033 5ustar amolstaff00000000000000Beaker-1.10.0/beaker/0000755000076500000240000000000013305324602014264 5ustar amolstaff00000000000000Beaker-1.10.0/beaker/__init__.py0000644000076500000240000000002713305312414016372 0ustar amolstaff00000000000000__version__ = '1.10.0' Beaker-1.10.0/beaker/_compat.py0000644000076500000240000001024013060057673016266 0ustar amolstaff00000000000000from __future__ import absolute_import import sys # True if we are running on Python 2. PY2 = sys.version_info[0] == 2 PYVER = sys.version_info[:2] JYTHON = sys.platform.startswith('java') if PY2 and not JYTHON: # pragma: no cover import cPickle as pickle else: # pragma: no cover import pickle if not PY2: # pragma: no cover xrange_ = range NoneType = type(None) string_type = str unicode_text = str byte_string = bytes from urllib.parse import urlencode as url_encode from urllib.parse import quote as url_quote from urllib.parse import unquote as url_unquote from urllib.parse import urlparse as url_parse from urllib.request import url2pathname import http.cookies as http_cookies from base64 import b64decode as _b64decode, b64encode as _b64encode try: import dbm as anydbm except: import dumbdbm as anydbm def b64decode(b): return _b64decode(b.encode('ascii')) def b64encode(s): return _b64encode(s).decode('ascii') def u_(s): return str(s) def bytes_(s): if isinstance(s, byte_string): return s return str(s).encode('ascii', 'strict') def dictkeyslist(d): return list(d.keys()) else: xrange_ = xrange from types import NoneType string_type = basestring unicode_text = unicode byte_string = str from urllib import urlencode as url_encode from urllib import quote as url_quote from urllib import unquote as url_unquote from urlparse import urlparse as url_parse from urllib import url2pathname import Cookie as http_cookies from base64 import b64decode, b64encode import anydbm def u_(s): if isinstance(s, unicode_text): return s if not isinstance(s, byte_string): s = str(s) return unicode(s, 'utf-8') def bytes_(s): if isinstance(s, byte_string): return s return str(s) def dictkeyslist(d): return d.keys() def im_func(f): if not PY2: # pragma: no cover return getattr(f, '__func__', None) else: return getattr(f, 'im_func', None) def default_im_func(f): if not PY2: # pragma: no cover return getattr(f, '__func__', f) else: return getattr(f, 'im_func', f) def im_self(f): if not PY2: # pragma: no cover return getattr(f, '__self__', None) else: return getattr(f, 'im_self', None) def im_class(f): if not PY2: # pragma: no cover self = im_self(f) if self is not None: return self.__class__ else: return None else: return getattr(f, 'im_class', None) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper if not PY2: # pragma: no cover import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: # pragma: no cover def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) try: from inspect import signature as func_signature except ImportError: from funcsigs import signature as func_signature def bindfuncargs(arginfo, args, kwargs): boundargs = arginfo.bind(*args, **kwargs) return boundargs.args, boundargs.kwargs Beaker-1.10.0/beaker/cache.py0000644000076500000240000005263513262740345015724 0ustar amolstaff00000000000000"""This package contains the "front end" classes and functions for Beaker caching. Included are the :class:`.Cache` and :class:`.CacheManager` classes, as well as the function decorators :func:`.region_decorate`, :func:`.region_invalidate`. """ import warnings from itertools import chain from beaker._compat import u_, unicode_text, func_signature, bindfuncargs import beaker.container as container import beaker.util as util from beaker.crypto.util import sha1 from beaker.exceptions import BeakerException, InvalidCacheBackendError from beaker.synchronization import _threading import beaker.ext.memcached as memcached import beaker.ext.database as database import beaker.ext.sqla as sqla import beaker.ext.google as google import beaker.ext.mongodb as mongodb import beaker.ext.redisnm as redisnm from functools import wraps # Initialize the cache region dict cache_regions = {} """Dictionary of 'region' arguments. A "region" is a string name that refers to a series of cache configuration arguments. An application may have multiple "regions" - one which stores things in a memory cache, one which writes data to files, etc. The dictionary stores string key names mapped to dictionaries of configuration arguments. Example:: from beaker.cache import cache_regions cache_regions.update({ 'short_term':{ 'expire':60, 'type':'memory' }, 'long_term':{ 'expire':1800, 'type':'dbm', 'data_dir':'/tmp', } }) """ cache_managers = {} class _backends(object): initialized = False def __init__(self, clsmap): self._clsmap = clsmap self._mutex = _threading.Lock() def __getitem__(self, key): try: return self._clsmap[key] except KeyError as e: if not self.initialized: self._mutex.acquire() try: if not self.initialized: self._init() self.initialized = True return self._clsmap[key] finally: self._mutex.release() raise e def _init(self): try: import pkg_resources # Load up the additional entry point defined backends for entry_point in pkg_resources.iter_entry_points('beaker.backends'): try: namespace_manager = entry_point.load() name = entry_point.name if name in self._clsmap: raise BeakerException("NamespaceManager name conflict,'%s' " "already loaded" % name) self._clsmap[name] = namespace_manager except (InvalidCacheBackendError, SyntaxError): # Ignore invalid backends pass except: import sys from pkg_resources import DistributionNotFound # Warn when there's a problem loading a NamespaceManager if not isinstance(sys.exc_info()[1], DistributionNotFound): import traceback try: from StringIO import StringIO # Python2 except ImportError: from io import StringIO # Python3 tb = StringIO() traceback.print_exc(file=tb) warnings.warn( "Unable to load NamespaceManager " "entry point: '%s': %s" % ( entry_point, tb.getvalue()), RuntimeWarning, 2) except ImportError: pass # Initialize the basic available backends clsmap = _backends({ 'memory': container.MemoryNamespaceManager, 'dbm': container.DBMNamespaceManager, 'file': container.FileNamespaceManager, 'ext:memcached': memcached.MemcachedNamespaceManager, 'ext:database': database.DatabaseNamespaceManager, 'ext:sqla': sqla.SqlaNamespaceManager, 'ext:google': google.GoogleNamespaceManager, 'ext:mongodb': mongodb.MongoNamespaceManager, 'ext:redis': redisnm.RedisNamespaceManager }) def cache_region(region, *args): """Decorate a function such that its return result is cached, using a "region" to indicate the cache arguments. Example:: from beaker.cache import cache_regions, cache_region # configure regions cache_regions.update({ 'short_term':{ 'expire':60, 'type':'memory' } }) @cache_region('short_term', 'load_things') def load(search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] The decorator can also be used with object methods. The ``self`` argument is not part of the cache key. This is based on the actual string name ``self`` being in the first argument position (new in 1.6):: class MyThing(object): @cache_region('short_term', 'load_things') def load(self, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] Classmethods work as well - use ``cls`` as the name of the class argument, and place the decorator around the function underneath ``@classmethod`` (new in 1.6):: class MyThing(object): @classmethod @cache_region('short_term', 'load_things') def load(cls, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] :param region: String name of the region corresponding to the desired caching arguments, established in :attr:`.cache_regions`. :param \*args: Optional ``str()``-compatible arguments which will uniquely identify the key used by this decorated function, in addition to the positional arguments passed to the function itself at call time. This is recommended as it is needed to distinguish between any two functions or methods that have the same name (regardless of parent class or not). .. note:: The function being decorated must only be called with positional arguments, and the arguments must support being stringified with ``str()``. The concatenation of the ``str()`` version of each argument, combined with that of the ``*args`` sent to the decorator, forms the unique cache key. .. note:: When a method on a class is decorated, the ``self`` or ``cls`` argument in the first position is not included in the "key" used for caching. New in 1.6. """ return _cache_decorate(args, None, None, region) def region_invalidate(namespace, region, *args): """Invalidate a cache region corresponding to a function decorated with :func:`.cache_region`. :param namespace: The namespace of the cache to invalidate. This is typically a reference to the original function (as returned by the :func:`.cache_region` decorator), where the :func:`.cache_region` decorator applies a "memo" to the function in order to locate the string name of the namespace. :param region: String name of the region used with the decorator. This can be ``None`` in the usual case that the decorated function itself is passed, not the string name of the namespace. :param args: Stringifyable arguments that are used to locate the correct key. This consists of the ``*args`` sent to the :func:`.cache_region` decorator itself, plus the ``*args`` sent to the function itself at runtime. Example:: from beaker.cache import cache_regions, cache_region, region_invalidate # configure regions cache_regions.update({ 'short_term':{ 'expire':60, 'type':'memory' } }) @cache_region('short_term', 'load_data') def load(search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] def invalidate_search(search_term, limit, offset): '''Invalidate the cached storage for a given search term, limit, offset.''' region_invalidate(load, 'short_term', 'load_data', search_term, limit, offset) Note that when a method on a class is decorated, the first argument ``cls`` or ``self`` is not included in the cache key. This means you don't send it to :func:`.region_invalidate`:: class MyThing(object): @cache_region('short_term', 'some_data') def load(self, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] def invalidate_search(self, search_term, limit, offset): '''Invalidate the cached storage for a given search term, limit, offset.''' region_invalidate(self.load, 'short_term', 'some_data', search_term, limit, offset) """ if callable(namespace): if not region: region = namespace._arg_region namespace = namespace._arg_namespace if not region: raise BeakerException("Region or callable function " "namespace is required") else: region = cache_regions[region] cache = Cache._get_cache(namespace, region) _cache_decorator_invalidate(cache, region.get('key_length', util.DEFAULT_CACHE_KEY_LENGTH), args) class Cache(object): """Front-end to the containment API implementing a data cache. :param namespace: the namespace of this Cache :param type: type of cache to use :param expire: seconds to keep cached data :param expiretime: seconds to keep cached data (legacy support) :param starttime: time when cache was cache was """ def __init__(self, namespace, type='memory', expiretime=None, starttime=None, expire=None, **nsargs): try: cls = clsmap[type] if isinstance(cls, InvalidCacheBackendError): raise cls except KeyError: raise TypeError("Unknown cache implementation %r" % type) if expire is not None: expire = int(expire) self.namespace_name = namespace self.namespace = cls(namespace, **nsargs) self.expiretime = expiretime or expire self.starttime = starttime self.nsargs = nsargs @classmethod def _get_cache(cls, namespace, kw): key = namespace + str(kw) try: return cache_managers[key] except KeyError: cache_managers[key] = cache = cls(namespace, **kw) return cache def put(self, key, value, **kw): self._get_value(key, **kw).set_value(value) set_value = put def get(self, key, **kw): """Retrieve a cached value from the container""" return self._get_value(key, **kw).get_value() get_value = get def remove_value(self, key, **kw): mycontainer = self._get_value(key, **kw) mycontainer.clear_value() remove = remove_value def _get_value(self, key, **kw): if isinstance(key, unicode_text): key = key.encode('ascii', 'backslashreplace') if 'type' in kw: return self._legacy_get_value(key, **kw) kw.setdefault('expiretime', self.expiretime) kw.setdefault('starttime', self.starttime) return container.Value(key, self.namespace, **kw) @util.deprecated("Specifying a " "'type' and other namespace configuration with cache.get()/put()/etc. " "is deprecated. Specify 'type' and other namespace configuration to " "cache_manager.get_cache() and/or the Cache constructor instead.") def _legacy_get_value(self, key, type, **kw): expiretime = kw.pop('expiretime', self.expiretime) starttime = kw.pop('starttime', None) createfunc = kw.pop('createfunc', None) kwargs = self.nsargs.copy() kwargs.update(kw) c = Cache(self.namespace.namespace, type=type, **kwargs) return c._get_value(key, expiretime=expiretime, createfunc=createfunc, starttime=starttime) def clear(self): """Clear all the values from the namespace""" self.namespace.remove() # dict interface def __getitem__(self, key): return self.get(key) def __contains__(self, key): return self._get_value(key).has_current_value() def has_key(self, key): return key in self def __delitem__(self, key): self.remove_value(key) def __setitem__(self, key, value): self.put(key, value) class CacheManager(object): def __init__(self, **kwargs): """Initialize a CacheManager object with a set of options Options should be parsed with the :func:`~beaker.util.parse_cache_config_options` function to ensure only valid options are used. """ self.kwargs = kwargs self.regions = kwargs.pop('cache_regions', {}) # Add these regions to the module global cache_regions.update(self.regions) def get_cache(self, name, **kwargs): kw = self.kwargs.copy() kw.update(kwargs) return Cache._get_cache(name, kw) def get_cache_region(self, name, region): if region not in self.regions: raise BeakerException('Cache region not configured: %s' % region) kw = self.regions[region] return Cache._get_cache(name, kw) def region(self, region, *args): """Decorate a function to cache itself using a cache region The region decorator requires arguments if there are more than two of the same named function, in the same module. This is because the namespace used for the functions cache is based on the functions name and the module. Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(): @cache.region('short_term', 'some_data') def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) return load('rabbits', 20, 0) .. note:: The function being decorated must only be called with positional arguments. """ return cache_region(region, *args) def region_invalidate(self, namespace, region, *args): """Invalidate a cache region namespace or decorated function This function only invalidates cache spaces created with the cache_region decorator. :param namespace: Either the namespace of the result to invalidate, or the cached function :param region: The region the function was cached to. If the function was cached to a single region then this argument can be None :param args: Arguments that were used to differentiate the cached function as well as the arguments passed to the decorated function Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(invalidate=False): @cache.region('short_term', 'some_data') def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) # If the results should be invalidated first if invalidate: cache.region_invalidate(load, None, 'some_data', 'rabbits', 20, 0) return load('rabbits', 20, 0) """ return region_invalidate(namespace, region, *args) def cache(self, *args, **kwargs): """Decorate a function to cache itself with supplied parameters :param args: Used to make the key unique for this function, as in region() above. :param kwargs: Parameters to be passed to get_cache(), will override defaults Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(): @cache.cache('mycache', expire=15) def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) return load('rabbits', 20, 0) .. note:: The function being decorated must only be called with positional arguments. """ return _cache_decorate(args, self, kwargs, None) def invalidate(self, func, *args, **kwargs): """Invalidate a cache decorated function This function only invalidates cache spaces created with the cache decorator. :param func: Decorated function to invalidate :param args: Used to make the key unique for this function, as in region() above. :param kwargs: Parameters that were passed for use by get_cache(), note that this is only required if a ``type`` was specified for the function Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(invalidate=False): @cache.cache('mycache', type="file", expire=15) def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) # If the results should be invalidated first if invalidate: cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file") return load('rabbits', 20, 0) """ namespace = func._arg_namespace cache = self.get_cache(namespace, **kwargs) if hasattr(func, '_arg_region'): cachereg = cache_regions[func._arg_region] key_length = cachereg.get('key_length', util.DEFAULT_CACHE_KEY_LENGTH) else: key_length = kwargs.pop('key_length', util.DEFAULT_CACHE_KEY_LENGTH) _cache_decorator_invalidate(cache, key_length, args) def _cache_decorate(deco_args, manager, options, region): """Return a caching function decorator.""" cache = [None] def decorate(func): namespace = util.func_namespace(func) skip_self = util.has_self_arg(func) signature = func_signature(func) @wraps(func) def cached(*args, **kwargs): if not cache[0]: if region is not None: if region not in cache_regions: raise BeakerException( 'Cache region not configured: %s' % region) reg = cache_regions[region] if not reg.get('enabled', True): return func(*args, **kwargs) cache[0] = Cache._get_cache(namespace, reg) elif manager: cache[0] = manager.get_cache(namespace, **options) else: raise Exception("'manager + kwargs' or 'region' " "argument is required") cache_key_kwargs = [] if kwargs: # kwargs provided, merge them in positional args # to avoid having different cache keys. args, kwargs = bindfuncargs(signature, args, kwargs) cache_key_kwargs = [u_(':').join((u_(key), u_(value))) for key, value in kwargs.items()] cache_key_args = args if skip_self: cache_key_args = args[1:] cache_key = u_(" ").join(map(u_, chain(deco_args, cache_key_args, cache_key_kwargs))) if region: cachereg = cache_regions[region] key_length = cachereg.get('key_length', util.DEFAULT_CACHE_KEY_LENGTH) else: key_length = options.pop('key_length', util.DEFAULT_CACHE_KEY_LENGTH) # TODO: This is probably a bug as length is checked before converting to UTF8 # which will cause cache_key to grow in size. if len(cache_key) + len(namespace) > int(key_length): cache_key = sha1(cache_key.encode('utf-8')).hexdigest() def go(): return func(*args, **kwargs) # save org function name go.__name__ = '_cached_%s' % (func.__name__,) return cache[0].get_value(cache_key, createfunc=go) cached._arg_namespace = namespace if region is not None: cached._arg_region = region return cached return decorate def _cache_decorator_invalidate(cache, key_length, args): """Invalidate a cache key based on function arguments.""" cache_key = u_(" ").join(map(u_, args)) if len(cache_key) + len(cache.namespace_name) > key_length: cache_key = sha1(cache_key.encode('utf-8')).hexdigest() cache.remove_value(cache_key) Beaker-1.10.0/beaker/container.py0000644000076500000240000005701713262740345016642 0ustar amolstaff00000000000000"""Container and Namespace classes""" import errno from ._compat import pickle, anydbm, add_metaclass, PYVER, unicode_text import beaker.util as util import logging import os import time from beaker.exceptions import CreationAbortedError, MissingCacheParameter from beaker.synchronization import _threading, file_synchronizer, \ mutex_synchronizer, NameLock, null_synchronizer __all__ = ['Value', 'Container', 'ContainerContext', 'MemoryContainer', 'DBMContainer', 'NamespaceManager', 'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer', 'OpenResourceNamespaceManager', 'FileNamespaceManager', 'CreationAbortedError'] logger = logging.getLogger('beaker.container') if logger.isEnabledFor(logging.DEBUG): debug = logger.debug else: def debug(message, *args): pass class NamespaceManager(object): """Handles dictionary operations and locking for a namespace of values. :class:`.NamespaceManager` provides a dictionary-like interface, implementing ``__getitem__()``, ``__setitem__()``, and ``__contains__()``, as well as functions related to lock acquisition. The implementation for setting and retrieving the namespace data is handled by subclasses. NamespaceManager may be used alone, or may be accessed by one or more :class:`.Value` objects. :class:`.Value` objects provide per-key services like expiration times and automatic recreation of values. Multiple NamespaceManagers created with a particular name will all share access to the same underlying datasource and will attempt to synchronize against a common mutex object. The scope of this sharing may be within a single process or across multiple processes, depending on the type of NamespaceManager used. The NamespaceManager itself is generally threadsafe, except in the case of the DBMNamespaceManager in conjunction with the gdbm dbm implementation. """ @classmethod def _init_dependencies(cls): """Initialize module-level dependent libraries required by this :class:`.NamespaceManager`.""" def __init__(self, namespace): self._init_dependencies() self.namespace = namespace def get_creation_lock(self, key): """Return a locking object that is used to synchronize multiple threads or processes which wish to generate a new cache value. This function is typically an instance of :class:`.FileSynchronizer`, :class:`.ConditionSynchronizer`, or :class:`.null_synchronizer`. The creation lock is only used when a requested value does not exist, or has been expired, and is only used by the :class:`.Value` key-management object in conjunction with a "createfunc" value-creation function. """ raise NotImplementedError() def do_remove(self): """Implement removal of the entire contents of this :class:`.NamespaceManager`. e.g. for a file-based namespace, this would remove all the files. The front-end to this method is the :meth:`.NamespaceManager.remove` method. """ raise NotImplementedError() def acquire_read_lock(self): """Establish a read lock. This operation is called before a key is read. By default the function does nothing. """ def release_read_lock(self): """Release a read lock. This operation is called after a key is read. By default the function does nothing. """ def acquire_write_lock(self, wait=True, replace=False): """Establish a write lock. This operation is called before a key is written. A return value of ``True`` indicates the lock has been acquired. By default the function returns ``True`` unconditionally. 'replace' is a hint indicating the full contents of the namespace may be safely discarded. Some backends may implement this (i.e. file backend won't unpickle the current contents). """ return True def release_write_lock(self): """Release a write lock. This operation is called after a new value is written. By default this function does nothing. """ def has_key(self, key): """Return ``True`` if the given key is present in this :class:`.Namespace`. """ return self.__contains__(key) def __getitem__(self, key): raise NotImplementedError() def __setitem__(self, key, value): raise NotImplementedError() def set_value(self, key, value, expiretime=None): """Sets a value in this :class:`.NamespaceManager`. This is the same as ``__setitem__()``, but also allows an expiration time to be passed at the same time. """ self[key] = value def __contains__(self, key): raise NotImplementedError() def __delitem__(self, key): raise NotImplementedError() def keys(self): """Return the list of all keys. This method may not be supported by all :class:`.NamespaceManager` implementations. """ raise NotImplementedError() def remove(self): """Remove the entire contents of this :class:`.NamespaceManager`. e.g. for a file-based namespace, this would remove all the files. """ self.do_remove() class OpenResourceNamespaceManager(NamespaceManager): """A NamespaceManager where read/write operations require opening/ closing of a resource which is possibly mutexed. """ def __init__(self, namespace): NamespaceManager.__init__(self, namespace) self.access_lock = self.get_access_lock() self.openers = 0 self.mutex = _threading.Lock() def get_access_lock(self): raise NotImplementedError() def do_open(self, flags, replace): raise NotImplementedError() def do_close(self): raise NotImplementedError() def acquire_read_lock(self): self.access_lock.acquire_read_lock() try: self.open('r', checkcount=True) except: self.access_lock.release_read_lock() raise def release_read_lock(self): try: self.close(checkcount=True) finally: self.access_lock.release_read_lock() def acquire_write_lock(self, wait=True, replace=False): r = self.access_lock.acquire_write_lock(wait) try: if (wait or r): self.open('c', checkcount=True, replace=replace) return r except: self.access_lock.release_write_lock() raise def release_write_lock(self): try: self.close(checkcount=True) finally: self.access_lock.release_write_lock() def open(self, flags, checkcount=False, replace=False): self.mutex.acquire() try: if checkcount: if self.openers == 0: self.do_open(flags, replace) self.openers += 1 else: self.do_open(flags, replace) self.openers = 1 finally: self.mutex.release() def close(self, checkcount=False): self.mutex.acquire() try: if checkcount: self.openers -= 1 if self.openers == 0: self.do_close() else: if self.openers > 0: self.do_close() self.openers = 0 finally: self.mutex.release() def remove(self): self.access_lock.acquire_write_lock() try: self.close(checkcount=False) self.do_remove() finally: self.access_lock.release_write_lock() class Value(object): """Implements synchronization, expiration, and value-creation logic for a single value stored in a :class:`.NamespaceManager`. """ __slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\ 'namespace' def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None): self.key = key self.createfunc = createfunc self.expire_argument = expiretime self.starttime = starttime self.storedtime = -1 self.namespace = namespace def has_value(self): """return true if the container has a value stored. This is regardless of it being expired or not. """ self.namespace.acquire_read_lock() try: return self.key in self.namespace finally: self.namespace.release_read_lock() def can_have_value(self): return self.has_current_value() or self.createfunc is not None def has_current_value(self): self.namespace.acquire_read_lock() try: has_value = self.key in self.namespace if has_value: try: stored, expired, value = self._get_value() return not self._is_expired(stored, expired) except KeyError: pass return False finally: self.namespace.release_read_lock() def _is_expired(self, storedtime, expiretime): """Return true if this container's value is expired.""" return ( ( self.starttime is not None and storedtime < self.starttime ) or ( expiretime is not None and time.time() >= expiretime + storedtime ) ) def get_value(self): self.namespace.acquire_read_lock() try: has_value = self.has_value() if has_value: try: stored, expired, value = self._get_value() if not self._is_expired(stored, expired): return value except KeyError: # guard against un-mutexed backends raising KeyError has_value = False if not self.createfunc: raise KeyError(self.key) finally: self.namespace.release_read_lock() has_createlock = False creation_lock = self.namespace.get_creation_lock(self.key) if has_value: if not creation_lock.acquire(wait=False): debug("get_value returning old value while new one is created") return value else: debug("lock_creatfunc (didnt wait)") has_createlock = True if not has_createlock: debug("lock_createfunc (waiting)") creation_lock.acquire() debug("lock_createfunc (waited)") try: # see if someone created the value already self.namespace.acquire_read_lock() try: if self.has_value(): try: stored, expired, value = self._get_value() if not self._is_expired(stored, expired): return value except KeyError: # guard against un-mutexed backends raising KeyError pass finally: self.namespace.release_read_lock() debug("get_value creating new value") v = self.createfunc() self.set_value(v) return v finally: creation_lock.release() debug("released create lock") def _get_value(self): value = self.namespace[self.key] try: stored, expired, value = value except ValueError: if not len(value) == 2: raise # Old format: upgrade stored, value = value expired = self.expire_argument debug("get_value upgrading time %r expire time %r", stored, self.expire_argument) self.namespace.release_read_lock() self.set_value(value, stored) self.namespace.acquire_read_lock() except TypeError: # occurs when the value is None. memcached # may yank the rug from under us in which case # that's the result raise KeyError(self.key) return stored, expired, value def set_value(self, value, storedtime=None): self.namespace.acquire_write_lock() try: if storedtime is None: storedtime = time.time() debug("set_value stored time %r expire time %r", storedtime, self.expire_argument) self.namespace.set_value(self.key, (storedtime, self.expire_argument, value), expiretime=self.expire_argument) finally: self.namespace.release_write_lock() def clear_value(self): self.namespace.acquire_write_lock() try: debug("clear_value") if self.key in self.namespace: try: del self.namespace[self.key] except KeyError: # guard against un-mutexed backends raising KeyError pass self.storedtime = -1 finally: self.namespace.release_write_lock() class AbstractDictionaryNSManager(NamespaceManager): """A subclassable NamespaceManager that places data in a dictionary. Subclasses should provide a "dictionary" attribute or descriptor which returns a dict-like object. The dictionary will store keys that are local to the "namespace" attribute of this manager, so ensure that the dictionary will not be used by any other namespace. e.g.:: import collections cached_data = collections.defaultdict(dict) class MyDictionaryManager(AbstractDictionaryNSManager): def __init__(self, namespace): AbstractDictionaryNSManager.__init__(self, namespace) self.dictionary = cached_data[self.namespace] The above stores data in a global dictionary called "cached_data", which is structured as a dictionary of dictionaries, keyed first on namespace name to a sub-dictionary, then on actual cache key to value. """ def get_creation_lock(self, key): return NameLock( identifier="memorynamespace/funclock/%s/%s" % (self.namespace, key), reentrant=True ) def __getitem__(self, key): return self.dictionary[key] def __contains__(self, key): return self.dictionary.__contains__(key) def has_key(self, key): return self.dictionary.__contains__(key) def __setitem__(self, key, value): self.dictionary[key] = value def __delitem__(self, key): del self.dictionary[key] def do_remove(self): self.dictionary.clear() def keys(self): return self.dictionary.keys() class MemoryNamespaceManager(AbstractDictionaryNSManager): """:class:`.NamespaceManager` that uses a Python dictionary for storage.""" namespaces = util.SyncDict() def __init__(self, namespace, **kwargs): AbstractDictionaryNSManager.__init__(self, namespace) self.dictionary = MemoryNamespaceManager.\ namespaces.get(self.namespace, dict) class DBMNamespaceManager(OpenResourceNamespaceManager): """:class:`.NamespaceManager` that uses ``dbm`` files for storage.""" def __init__(self, namespace, dbmmodule=None, data_dir=None, dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not dbm_dir and not data_dir: raise MissingCacheParameter("data_dir or dbm_dir is required") elif dbm_dir: self.dbm_dir = dbm_dir else: self.dbm_dir = data_dir + "/container_dbm" util.verify_directory(self.dbm_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_dbm_lock" util.verify_directory(self.lock_dir) self.dbmmodule = dbmmodule or anydbm self.dbm = None OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root=self.dbm_dir, identifiers=[self.namespace], extension='.dbm', digest_filenames=self.digest_filenames) debug("data file %s", self.file) self._checkfile() def get_access_lock(self): return file_synchronizer(identifier=self.namespace, lock_dir=self.lock_dir) def get_creation_lock(self, key): return file_synchronizer( identifier="dbmcontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir=self.lock_dir ) def file_exists(self, file): if os.access(file, os.F_OK): return True else: for ext in ('db', 'dat', 'pag', 'dir'): if os.access(file + os.extsep + ext, os.F_OK): return True return False def _ensuredir(self, filename): dirname = os.path.dirname(filename) if not os.path.exists(dirname): util.verify_directory(dirname) def _checkfile(self): if not self.file_exists(self.file): self._ensuredir(self.file) g = self.dbmmodule.open(self.file, 'c') g.close() def get_filenames(self): list = [] if os.access(self.file, os.F_OK): list.append(self.file) for ext in ('pag', 'dir', 'db', 'dat'): if os.access(self.file + os.extsep + ext, os.F_OK): list.append(self.file + os.extsep + ext) return list def do_open(self, flags, replace): debug("opening dbm file %s", self.file) try: self.dbm = self.dbmmodule.open(self.file, flags) except: self._checkfile() self.dbm = self.dbmmodule.open(self.file, flags) def do_close(self): if self.dbm is not None: debug("closing dbm file %s", self.file) self.dbm.close() def do_remove(self): for f in self.get_filenames(): os.remove(f) def __getitem__(self, key): return pickle.loads(self.dbm[key]) def __contains__(self, key): if PYVER == (3, 2): # Looks like this is a bug that got solved in PY3.3 and PY3.4 # http://bugs.python.org/issue19288 if isinstance(key, unicode_text): key = key.encode('UTF-8') return key in self.dbm def __setitem__(self, key, value): self.dbm[key] = pickle.dumps(value) def __delitem__(self, key): del self.dbm[key] def keys(self): return self.dbm.keys() class FileNamespaceManager(OpenResourceNamespaceManager): """:class:`.NamespaceManager` that uses binary files for storage. Each namespace is implemented as a single file storing a dictionary of key/value pairs, serialized using the Python ``pickle`` module. """ def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not file_dir and not data_dir: raise MissingCacheParameter("data_dir or file_dir is required") elif file_dir: self.file_dir = file_dir else: self.file_dir = data_dir + "/container_file" util.verify_directory(self.file_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_file_lock" util.verify_directory(self.lock_dir) OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root=self.file_dir, identifiers=[self.namespace], extension='.cache', digest_filenames=self.digest_filenames) self.hash = {} debug("data file %s", self.file) def get_access_lock(self): return file_synchronizer(identifier=self.namespace, lock_dir=self.lock_dir) def get_creation_lock(self, key): return file_synchronizer( identifier="dbmcontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir=self.lock_dir ) def file_exists(self, file): return os.access(file, os.F_OK) def do_open(self, flags, replace): if not replace and self.file_exists(self.file): try: with open(self.file, 'rb') as fh: self.hash = pickle.load(fh) except IOError as e: # Ignore EACCES and ENOENT as it just means we are no longer # able to access the file or that it no longer exists if e.errno not in [errno.EACCES, errno.ENOENT]: raise self.flags = flags def do_close(self): if self.flags == 'c' or self.flags == 'w': pickled = pickle.dumps(self.hash) util.safe_write(self.file, pickled) self.hash = {} self.flags = None def do_remove(self): try: os.remove(self.file) except OSError: # for instance, because we haven't yet used this cache, # but client code has asked for a clear() operation... pass self.hash = {} def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return key in self.hash def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() #### legacy stuff to support the old "Container" class interface namespace_classes = {} ContainerContext = dict class ContainerMeta(type): def __init__(cls, classname, bases, dict_): namespace_classes[cls] = cls.namespace_class return type.__init__(cls, classname, bases, dict_) def __call__(self, key, context, namespace, createfunc=None, expiretime=None, starttime=None, **kwargs): if namespace in context: ns = context[namespace] else: nscls = namespace_classes[self] context[namespace] = ns = nscls(namespace, **kwargs) return Value(key, ns, createfunc=createfunc, expiretime=expiretime, starttime=starttime) @add_metaclass(ContainerMeta) class Container(object): """Implements synchronization and value-creation logic for a 'value' stored in a :class:`.NamespaceManager`. :class:`.Container` and its subclasses are deprecated. The :class:`.Value` class is now used for this purpose. """ namespace_class = NamespaceManager class FileContainer(Container): namespace_class = FileNamespaceManager class MemoryContainer(Container): namespace_class = MemoryNamespaceManager class DBMContainer(Container): namespace_class = DBMNamespaceManager DbmContainer = DBMContainer Beaker-1.10.0/beaker/converters.py0000644000076500000240000000160212473366102017035 0ustar amolstaff00000000000000from beaker._compat import string_type # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php def asbool(obj): if isinstance(obj, string_type): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError( "String is not true/false: %r" % obj) return bool(obj) def aslist(obj, sep=None, strip=True): if isinstance(obj, string_type): lst = obj.split(sep) if strip: lst = [v.strip() for v in lst] return lst elif isinstance(obj, (list, tuple)): return obj elif obj is None: return [] else: return [obj] Beaker-1.10.0/beaker/cookie.py0000644000076500000240000000616513305311623016116 0ustar amolstaff00000000000000import sys from ._compat import http_cookies # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"') # Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+ # http://bugs.python.org/issue22775 cookie_pickles_properly = ( (sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3) ) # Add support for the SameSite attribute (obsolete when PY37 is unsupported). http_cookies.Morsel._reserved.setdefault('samesite', 'SameSite') # Adapted from Django.http.cookies and always enabled the bad_cookies # behaviour to cope with any invalid cookie key while keeping around # the session. class SimpleCookie(http_cookies.SimpleCookie): if not cookie_pickles_properly: def __setitem__(self, key, value): # Apply the fix from http://bugs.python.org/issue22775 where # it's not fixed in Python itself if isinstance(value, http_cookies.Morsel): # allow assignment of constructed Morsels (e.g. for pickling) dict.__setitem__(self, key, value) else: super(SimpleCookie, self).__setitem__(key, value) if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",", "\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded def load(self, rawdata): self.bad_cookies = set() super(SimpleCookie, self).load(rawdata) for key in self.bad_cookies: del self[key] # override private __set() method: # (needed for using our Morsel, and for laxness with CookieError def _BaseCookie__set(self, key, real_value, coded_value): try: super(SimpleCookie, self)._BaseCookie__set(key, real_value, coded_value) except http_cookies.CookieError: if not hasattr(self, 'bad_cookies'): self.bad_cookies = set() self.bad_cookies.add(key) dict.__setitem__(self, key, http_cookies.Morsel()) Beaker-1.10.0/beaker/crypto/0000755000076500000240000000000013305324602015604 5ustar amolstaff00000000000000Beaker-1.10.0/beaker/crypto/__init__.py0000644000076500000240000000460213060057514017722 0ustar amolstaff00000000000000from .._compat import JYTHON from beaker.crypto.pbkdf2 import pbkdf2 from beaker.crypto.util import hmac, sha1, hmac_sha1, md5 from beaker import util from beaker.exceptions import InvalidCryptoBackendError keyLength = None DEFAULT_NONCE_BITS = 128 CRYPTO_MODULES = {} def load_default_module(): """ Load the default crypto module """ if JYTHON: try: from beaker.crypto import jcecrypto return jcecrypto except ImportError: pass else: try: from beaker.crypto import nsscrypto return nsscrypto except ImportError: try: from beaker.crypto import pycrypto return pycrypto except ImportError: pass from beaker.crypto import noencryption return noencryption def register_crypto_module(name, mod): """ Register the given module under the name given. """ CRYPTO_MODULES[name] = mod def get_crypto_module(name): """ Get the active crypto module for this name """ if name not in CRYPTO_MODULES: if name == 'default': register_crypto_module('default', load_default_module()) elif name == 'nss': from beaker.crypto import nsscrypto register_crypto_module(name, nsscrypto) elif name == 'pycrypto': from beaker.crypto import pycrypto register_crypto_module(name, pycrypto) elif name == 'cryptography': from beaker.crypto import pyca_cryptography register_crypto_module(name, pyca_cryptography) else: raise InvalidCryptoBackendError( "No crypto backend with name '%s' is registered." % name) return CRYPTO_MODULES[name] def generateCryptoKeys(master_key, salt, iterations, keylen): # NB: We XOR parts of the keystream into the randomly-generated parts, just # in case os.urandom() isn't as random as it should be. Note that if # os.urandom() returns truly random data, this will have no effect on the # overall security. return pbkdf2(master_key, salt, iterations=iterations, dklen=keylen) def get_nonce_size(number_of_bits): if number_of_bits % 8: raise ValueError('Nonce complexity currently supports multiples of 8') bytes = number_of_bits // 8 b64bytes = ((4 * bytes // 3) + 3) & ~3 return bytes, b64bytes Beaker-1.10.0/beaker/crypto/jcecrypto.py0000644000076500000240000000242113060057514020162 0ustar amolstaff00000000000000""" Encryption module that uses the Java Cryptography Extensions (JCE). Note that in default installations of the Java Runtime Environment, the maximum key length is limited to 128 bits due to US export restrictions. This makes the generated keys incompatible with the ones generated by pycryptopp, which has no such restrictions. To fix this, download the "Unlimited Strength Jurisdiction Policy Files" from Sun, which will allow encryption using 256 bit AES keys. """ from warnings import warn from javax.crypto import Cipher from javax.crypto.spec import SecretKeySpec, IvParameterSpec import jarray # Initialization vector filled with zeros _iv = IvParameterSpec(jarray.zeros(16, 'b')) def aesEncrypt(data, key): cipher = Cipher.getInstance('AES/CTR/NoPadding') skeySpec = SecretKeySpec(key, 'AES') cipher.init(Cipher.ENCRYPT_MODE, skeySpec, _iv) return cipher.doFinal(data).tostring() # magic. aesDecrypt = aesEncrypt has_aes = True def getKeyLength(): maxlen = Cipher.getMaxAllowedKeyLength('AES/CTR/NoPadding') return min(maxlen, 256) / 8 if getKeyLength() < 32: warn('Crypto implementation only supports key lengths up to %d bits. ' 'Generated session cookies may be incompatible with other ' 'environments' % (getKeyLength() * 8)) Beaker-1.10.0/beaker/crypto/noencryption.py0000644000076500000240000000026613060057514020714 0ustar amolstaff00000000000000"""Encryption module that does nothing""" def aesEncrypt(data, key): return data def aesDecrypt(data, key): return data has_aes = False def getKeyLength(): return 32 Beaker-1.10.0/beaker/crypto/nsscrypto.py0000644000076500000240000000310213305311623020215 0ustar amolstaff00000000000000"""Encryption module that uses nsscrypto""" import nss.nss nss.nss.nss_init_nodb() # Apparently the rest of beaker doesn't care about the particluar cipher, # mode and padding used. # NOTE: A constant IV!!! This is only secure if the KEY is never reused!!! _mech = nss.nss.CKM_AES_CBC_PAD _iv = '\0' * nss.nss.get_iv_length(_mech) def aesEncrypt(data, key): slot = nss.nss.get_best_slot(_mech) key_obj = nss.nss.import_sym_key(slot, _mech, nss.nss.PK11_OriginGenerated, nss.nss.CKA_ENCRYPT, nss.nss.SecItem(key)) param = nss.nss.param_from_iv(_mech, nss.nss.SecItem(_iv)) ctx = nss.nss.create_context_by_sym_key(_mech, nss.nss.CKA_ENCRYPT, key_obj, param) l1 = ctx.cipher_op(data) # Yes, DIGEST. This needs fixing in NSS, but apparently nobody (including # me :( ) cares enough. l2 = ctx.digest_final() return l1 + l2 def aesDecrypt(data, key): slot = nss.nss.get_best_slot(_mech) key_obj = nss.nss.import_sym_key(slot, _mech, nss.nss.PK11_OriginGenerated, nss.nss.CKA_DECRYPT, nss.nss.SecItem(key)) param = nss.nss.param_from_iv(_mech, nss.nss.SecItem(_iv)) ctx = nss.nss.create_context_by_sym_key(_mech, nss.nss.CKA_DECRYPT, key_obj, param) l1 = ctx.cipher_op(data) # Yes, DIGEST. This needs fixing in NSS, but apparently nobody (including # me :( ) cares enough. l2 = ctx.digest_final() return l1 + l2 has_aes = True def getKeyLength(): return 32 Beaker-1.10.0/beaker/crypto/pbkdf2.py0000644000076500000240000000624412650655747017357 0ustar amolstaff00000000000000""" PBKDF2 Implementation adapted from django.utils.crypto. This is used to generate the encryption key for enciphered sessions. """ from beaker._compat import bytes_, xrange_ import hmac import struct import hashlib import binascii def _bin_to_long(x): """Convert a binary string into a long integer""" return int(binascii.hexlify(x), 16) def _long_to_bin(x, hex_format_string): """ Convert a long integer into a binary string. hex_format_string is like "%020x" for padding 10 characters. """ return binascii.unhexlify((hex_format_string % x).encode('ascii')) if hasattr(hashlib, "pbkdf2_hmac"): def pbkdf2(password, salt, iterations, dklen=0, digest=None): """ Implements PBKDF2 using the stdlib. This is used in Python 2.7.8+ and 3.4+. HMAC+SHA256 is used as the default pseudo random function. As of 2014, 100,000 iterations was the recommended default which took 100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is probably the bare minimum for security given 1000 iterations was recommended in 2001. """ if digest is None: digest = hashlib.sha1 if not dklen: dklen = None password = bytes_(password) salt = bytes_(salt) return hashlib.pbkdf2_hmac( digest().name, password, salt, iterations, dklen) else: def pbkdf2(password, salt, iterations, dklen=0, digest=None): """ Implements PBKDF2 as defined in RFC 2898, section 5.2 HMAC+SHA256 is used as the default pseudo random function. As of 2014, 100,000 iterations was the recommended default which took 100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is probably the bare minimum for security given 1000 iterations was recommended in 2001. This code is very well optimized for CPython and is about five times slower than OpenSSL's implementation. """ assert iterations > 0 if not digest: digest = hashlib.sha1 password = bytes_(password) salt = bytes_(salt) hlen = digest().digest_size if not dklen: dklen = hlen if dklen > (2 ** 32 - 1) * hlen: raise OverflowError('dklen too big') l = -(-dklen // hlen) r = dklen - (l - 1) * hlen hex_format_string = "%%0%ix" % (hlen * 2) inner, outer = digest(), digest() if len(password) > inner.block_size: password = digest(password).digest() password += b'\x00' * (inner.block_size - len(password)) inner.update(password.translate(hmac.trans_36)) outer.update(password.translate(hmac.trans_5C)) def F(i): u = salt + struct.pack(b'>I', i) result = 0 for j in xrange_(int(iterations)): dig1, dig2 = inner.copy(), outer.copy() dig1.update(u) dig2.update(dig1.digest()) u = dig2.digest() result ^= _bin_to_long(u) return _long_to_bin(result, hex_format_string) T = [F(x) for x in xrange_(1, l)] return b''.join(T) + F(l)[:r] Beaker-1.10.0/beaker/crypto/pyca_cryptography.py0000644000076500000240000000246513060057514021737 0ustar amolstaff00000000000000"""Encryption module that uses pyca/cryptography""" import os import json from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) def aesEncrypt(data, key): # Generate a random 96-bit IV. iv = os.urandom(12) # Construct an AES-GCM Cipher object with the given key and a # randomly generated IV. encryptor = Cipher( algorithms.AES(key), modes.GCM(iv), backend=default_backend() ).encryptor() # Encrypt the plaintext and get the associated ciphertext. # GCM does not require padding. ciphertext = encryptor.update(data) + encryptor.finalize() return iv + encryptor.tag + ciphertext def aesDecrypt(data, key): iv = data[:12] tag = data[12:28] ciphertext = data[28:] # Construct a Cipher object, with the key, iv, and additionally the # GCM tag used for authenticating the message. decryptor = Cipher( algorithms.AES(key), modes.GCM(iv, tag), backend=default_backend() ).decryptor() # Decryption gets us the authenticated plaintext. # If the tag does not match an InvalidTag exception will be raised. return decryptor.update(ciphertext) + decryptor.finalize() has_aes = True def getKeyLength(): return 32 Beaker-1.10.0/beaker/crypto/pycrypto.py0000644000076500000240000000173013060057514020053 0ustar amolstaff00000000000000"""Encryption module that uses pycryptopp or pycrypto""" try: # Pycryptopp is preferred over Crypto because Crypto has had # various periods of not being maintained, and pycryptopp uses # the Crypto++ library which is generally considered the 'gold standard' # of crypto implementations from pycryptopp.cipher import aes def aesEncrypt(data, key): cipher = aes.AES(key) return cipher.process(data) # magic. aesDecrypt = aesEncrypt except ImportError: from Crypto.Cipher import AES from Crypto.Util import Counter def aesEncrypt(data, key): cipher = AES.new(key, AES.MODE_CTR, counter=Counter.new(128, initial_value=0)) return cipher.encrypt(data) def aesDecrypt(data, key): cipher = AES.new(key, AES.MODE_CTR, counter=Counter.new(128, initial_value=0)) return cipher.decrypt(data) has_aes = True def getKeyLength(): return 32 Beaker-1.10.0/beaker/crypto/util.py0000644000076500000240000000066012473366102017143 0ustar amolstaff00000000000000from hashlib import md5 try: # Use PyCrypto (if available) from Crypto.Hash import HMAC as hmac, SHA as hmac_sha1 sha1 = hmac_sha1.new except ImportError: # PyCrypto not available. Use the Python standard library. import hmac # NOTE: We have to use the callable with hashlib (hashlib.sha1), # otherwise hmac only accepts the sha module object itself from hashlib import sha1 hmac_sha1 = sha1Beaker-1.10.0/beaker/exceptions.py0000644000076500000240000000067312473366102017033 0ustar amolstaff00000000000000"""Beaker exception classes""" class BeakerException(Exception): pass class BeakerWarning(RuntimeWarning): """Issued at runtime.""" class CreationAbortedError(Exception): """Deprecated.""" class InvalidCacheBackendError(BeakerException, ImportError): pass class MissingCacheParameter(BeakerException): pass class LockError(BeakerException): pass class InvalidCryptoBackendError(BeakerException): pass Beaker-1.10.0/beaker/ext/0000755000076500000240000000000013305324602015064 5ustar amolstaff00000000000000Beaker-1.10.0/beaker/ext/__init__.py0000644000076500000240000000000012473366102017171 0ustar amolstaff00000000000000Beaker-1.10.0/beaker/ext/database.py0000644000076500000240000001420313271434217017210 0ustar amolstaff00000000000000from beaker._compat import pickle import logging import pickle from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer, null_synchronizer from beaker.util import verify_directory, SyncDict log = logging.getLogger(__name__) sa = None pool = None types = None class DatabaseNamespaceManager(OpenResourceNamespaceManager): metadatas = SyncDict() tables = SyncDict() @classmethod def _init_dependencies(cls): global sa, pool, types if sa is not None: return try: import sqlalchemy as sa import sqlalchemy.pool as pool from sqlalchemy import types except ImportError: raise InvalidCacheBackendError("Database cache backend requires " "the 'sqlalchemy' library") def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, schema_name=None, **params): """Creates a database namespace manager ``url`` SQLAlchemy compliant db url ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. ``schema_name`` The schema name to use in the database for the cache. """ OpenResourceNamespaceManager.__init__(self, namespace) if sa_opts is None: sa_opts = {} self.lock_dir = None if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_db_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('namespace', types.String(255), nullable=False), sa.Column('accessed', types.DateTime, nullable=False), sa.Column('created', types.DateTime, nullable=False), sa.Column('data', types.PickleType, nullable=False), sa.UniqueConstraint('namespace'), schema=schema_name if schema_name else meta.schema ) cache.create(checkfirst=True) return cache self.hash = {} self._is_new = False self.loaded = False self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache) def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): return file_synchronizer( identifier="databasecontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir=self.lock_dir) def do_open(self, flags, replace): # If we already loaded the data, don't bother loading it again if self.loaded: self.flags = flags return cache = self.cache result_proxy = sa.select([cache.c.data], cache.c.namespace == self.namespace ).execute() result = result_proxy.fetchone() result_proxy.close() if not result: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = result['data'] except (IOError, OSError, EOFError, pickle.PickleError, pickle.PickleError): log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): cache = self.cache if self._is_new: cache.insert().execute(namespace=self.namespace, data=self.hash, accessed=datetime.now(), created=datetime.now()) self._is_new = False else: cache.update(cache.c.namespace == self.namespace).execute( data=self.hash, accessed=datetime.now()) self.flags = None def do_remove(self): cache = self.cache cache.delete(cache.c.namespace == self.namespace).execute() self.hash = {} # We can retain the fact that we did a load attempt, but since the # file is gone this will be a new namespace should it be saved. self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return key in self.hash def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class DatabaseContainer(Container): namespace_manager = DatabaseNamespaceManager Beaker-1.10.0/beaker/ext/google.py0000644000076500000240000000757112473366102016732 0ustar amolstaff00000000000000from beaker._compat import pickle import logging from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError from beaker.synchronization import null_synchronizer log = logging.getLogger(__name__) db = None class GoogleNamespaceManager(OpenResourceNamespaceManager): tables = {} @classmethod def _init_dependencies(cls): global db if db is not None: return try: db = __import__('google.appengine.ext.db').appengine.ext.db except ImportError: raise InvalidCacheBackendError("Datastore cache backend requires the " "'google.appengine.ext' library") def __init__(self, namespace, table_name='beaker_cache', **params): """Creates a datastore namespace manager""" OpenResourceNamespaceManager.__init__(self, namespace) def make_cache(): table_dict = dict(created=db.DateTimeProperty(), accessed=db.DateTimeProperty(), data=db.BlobProperty()) table = type(table_name, (db.Model,), table_dict) return table self.table_name = table_name self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache()) self.hash = {} self._is_new = False self.loaded = False self.log_debug = logging.DEBUG >= log.getEffectiveLevel() # Google wants namespaces to start with letters, change the namespace # to start with a letter self.namespace = 'p%s' % self.namespace def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): # this is weird, should probably be present return null_synchronizer() def do_open(self, flags, replace): # If we already loaded the data, don't bother loading it again if self.loaded: self.flags = flags return item = self.cache.get_by_key_name(self.namespace) if not item: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = pickle.loads(str(item.data)) except (IOError, OSError, EOFError, pickle.PickleError): if self.log_debug: log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): if self._is_new: item = self.cache(key_name=self.namespace) item.data = pickle.dumps(self.hash) item.created = datetime.now() item.accessed = datetime.now() item.put() self._is_new = False else: item = self.cache.get_by_key_name(self.namespace) item.data = pickle.dumps(self.hash) item.accessed = datetime.now() item.put() self.flags = None def do_remove(self): item = self.cache.get_by_key_name(self.namespace) item.delete() self.hash = {} # We can retain the fact that we did a load attempt, but since the # file is gone this will be a new namespace should it be saved. self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return key in self.hash def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class GoogleContainer(Container): namespace_class = GoogleNamespaceManager Beaker-1.10.0/beaker/ext/memcached.py0000644000076500000240000001550413271430450017352 0ustar amolstaff00000000000000from .._compat import PY2 from beaker.container import NamespaceManager, Container from beaker.crypto.util import sha1 from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer from beaker.util import verify_directory, SyncDict, parse_memcached_behaviors import warnings MAX_KEY_LENGTH = 250 _client_libs = {} def _load_client(name='auto'): if name in _client_libs: return _client_libs[name] def _pylibmc(): global pylibmc import pylibmc return pylibmc def _cmemcache(): global cmemcache import cmemcache warnings.warn("cmemcache is known to have serious " "concurrency issues; consider using 'memcache' " "or 'pylibmc'") return cmemcache def _memcache(): global memcache import memcache return memcache def _bmemcached(): global bmemcached import bmemcached return bmemcached def _auto(): for _client in (_pylibmc, _cmemcache, _memcache, _bmemcached): try: return _client() except ImportError: pass else: raise InvalidCacheBackendError( "Memcached cache backend requires one " "of: 'pylibmc' or 'memcache' to be installed.") clients = { 'pylibmc': _pylibmc, 'cmemcache': _cmemcache, 'memcache': _memcache, 'bmemcached': _bmemcached, 'auto': _auto } _client_libs[name] = clib = clients[name]() return clib def _is_configured_for_pylibmc(memcache_module_config, memcache_client): return memcache_module_config == 'pylibmc' or \ memcache_client.__name__.startswith('pylibmc') class MemcachedNamespaceManager(NamespaceManager): """Provides the :class:`.NamespaceManager` API over a memcache client library.""" clients = SyncDict() def __new__(cls, *args, **kw): memcache_module = kw.pop('memcache_module', 'auto') memcache_client = _load_client(memcache_module) if _is_configured_for_pylibmc(memcache_module, memcache_client): return object.__new__(PyLibMCNamespaceManager) else: return object.__new__(MemcachedNamespaceManager) def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") self.lock_dir = None if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check for pylibmc namespace manager, in which case client will be # instantiated by subclass __init__, to handle behavior passing to the # pylibmc client if not _is_configured_for_pylibmc(memcache_module, _memcache_module): self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';')) def get_creation_lock(self, key): return file_synchronizer( identifier="memcachedcontainer/funclock/%s/%s" % (self.namespace, key), lock_dir=self.lock_dir) def _format_key(self, key): if not isinstance(key, str): key = key.decode('ascii') formated_key = (self.namespace + '_' + key).replace(' ', '\302\267') if len(formated_key) > MAX_KEY_LENGTH: if not PY2: formated_key = formated_key.encode('utf-8') formated_key = sha1(formated_key).hexdigest() return formated_key def __getitem__(self, key): return self.mc.get(self._format_key(key)) def __contains__(self, key): value = self.mc.get(self._format_key(key)) return value is not None def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): if expiretime: self.mc.set(self._format_key(key), value, time=expiretime) else: self.mc.set(self._format_key(key), value) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): self.mc.delete(self._format_key(key)) def do_remove(self): self.mc.flush_all() def keys(self): raise NotImplementedError( "Memcache caching does not " "support iteration of all cache keys") class PyLibMCNamespaceManager(MemcachedNamespaceManager): """Provide thread-local support for pylibmc.""" pools = SyncDict() def __init__(self, *arg, **kw): super(PyLibMCNamespaceManager, self).__init__(*arg, **kw) memcache_module = kw.get('memcache_module', 'auto') _memcache_module = _client_libs[memcache_module] protocol = kw.get('protocol', 'text') username = kw.get('username', None) password = kw.get('password', None) url = kw.get('url') behaviors = parse_memcached_behaviors(kw) self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, servers=url.split(';'), behaviors=behaviors, binary=(protocol == 'binary'), username=username, password=password) self.pool = PyLibMCNamespaceManager.pools.get( (memcache_module, url), pylibmc.ThreadMappedPool, self.mc) def __getitem__(self, key): with self.pool.reserve() as mc: return mc.get(self._format_key(key)) def __contains__(self, key): with self.pool.reserve() as mc: value = mc.get(self._format_key(key)) return value is not None def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): with self.pool.reserve() as mc: if expiretime: mc.set(self._format_key(key), value, time=expiretime) else: mc.set(self._format_key(key), value) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): with self.pool.reserve() as mc: mc.delete(self._format_key(key)) def do_remove(self): with self.pool.reserve() as mc: mc.flush_all() class MemcachedContainer(Container): """Container class which invokes :class:`.MemcacheNamespaceManager`.""" namespace_class = MemcachedNamespaceManager Beaker-1.10.0/beaker/ext/mongodb.py0000644000076500000240000001507713121673026017100 0ustar amolstaff00000000000000import datetime import os import threading import time import pickle try: import pymongo import pymongo.errors import bson except ImportError: pymongo = None bson = None from beaker.container import NamespaceManager from beaker.synchronization import SynchronizerImpl from beaker.util import SyncDict, machine_identifier from beaker.crypto.util import sha1 from beaker._compat import string_type, PY2 class MongoNamespaceManager(NamespaceManager): """Provides the :class:`.NamespaceManager` API over MongoDB. Provided ``url`` can be both a mongodb connection string or an already existing MongoClient instance. The data will be stored into ``beaker_cache`` collection of the *default database*, so make sure your connection string or MongoClient point to a default database. """ MAX_KEY_LENGTH = 1024 clients = SyncDict() def __init__(self, namespace, url, **kw): super(MongoNamespaceManager, self).__init__(namespace) self.lock_dir = None # MongoDB uses mongo itself for locking. if pymongo is None: raise RuntimeError('pymongo3 is not available') if isinstance(url, string_type): self.client = MongoNamespaceManager.clients.get(url, pymongo.MongoClient, url) else: self.client = url self.db = self.client.get_default_database() def _format_key(self, key): if not isinstance(key, str): key = key.decode('ascii') if len(key) > (self.MAX_KEY_LENGTH - len(self.namespace) - 1): if not PY2: key = key.encode('utf-8') key = sha1(key).hexdigest() return '%s:%s' % (self.namespace, key) def get_creation_lock(self, key): return MongoSynchronizer(self._format_key(key), self.client) def __getitem__(self, key): self._clear_expired() entry = self.db.backer_cache.find_one({'_id': self._format_key(key)}) if entry is None: raise KeyError(key) return pickle.loads(entry['value']) def __contains__(self, key): self._clear_expired() entry = self.db.backer_cache.find_one({'_id': self._format_key(key)}) return entry is not None def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): self._clear_expired() expiration = None if expiretime is not None: expiration = time.time() + expiretime value = pickle.dumps(value) self.db.backer_cache.update_one({'_id': self._format_key(key)}, {'$set': {'value': bson.Binary(value), 'expiration': expiration}}, upsert=True) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): self._clear_expired() self.db.backer_cache.delete_many({'_id': self._format_key(key)}) def do_remove(self): self.db.backer_cache.delete_many({'_id': {'$regex': '^%s' % self.namespace}}) def keys(self): return [e['key'].split(':', 1)[-1] for e in self.db.backer_cache.find_all( {'_id': {'$regex': '^%s' % self.namespace}} )] def _clear_expired(self): now = time.time() self.db.backer_cache.delete_many({'_id': {'$regex': '^%s' % self.namespace}, 'expiration': {'$ne': None, '$lte': now}}) class MongoSynchronizer(SynchronizerImpl): """Provides a Writer/Reader lock based on MongoDB. Provided ``url`` can be both a mongodb connection string or an already existing MongoClient instance. The data will be stored into ``beaker_locks`` collection of the *default database*, so make sure your connection string or MongoClient point to a default database. Locks are identified by local machine, PID and threadid, so are suitable for use in both local and distributed environments. """ # If a cache entry generation function can take a lot, # but 15 minutes is more than a reasonable time. LOCK_EXPIRATION = 900 MACHINE_ID = machine_identifier() def __init__(self, identifier, url): super(MongoSynchronizer, self).__init__() self.identifier = identifier if isinstance(url, string_type): self.client = MongoNamespaceManager.clients.get(url, pymongo.MongoClient, url) else: self.client = url self.db = self.client.get_default_database() def _clear_expired_locks(self): now = datetime.datetime.utcnow() expired = now - datetime.timedelta(seconds=self.LOCK_EXPIRATION) self.db.beaker_locks.delete_many({'_id': self.identifier, 'timestamp': {'$lte': expired}}) return now def _get_owner_id(self): return '%s-%s-%s' % (self.MACHINE_ID, os.getpid(), threading.current_thread().ident) def do_release_read_lock(self): owner_id = self._get_owner_id() self.db.beaker_locks.update_one({'_id': self.identifier, 'readers': owner_id}, {'$pull': {'readers': owner_id}}) def do_acquire_read_lock(self, wait): now = self._clear_expired_locks() owner_id = self._get_owner_id() while True: try: self.db.beaker_locks.update_one({'_id': self.identifier, 'owner': None}, {'$set': {'timestamp': now}, '$push': {'readers': owner_id}}, upsert=True) return True except pymongo.errors.DuplicateKeyError: if not wait: return False time.sleep(0.2) def do_release_write_lock(self): self.db.beaker_locks.delete_one({'_id': self.identifier, 'owner': self._get_owner_id()}) def do_acquire_write_lock(self, wait): now = self._clear_expired_locks() owner_id = self._get_owner_id() while True: try: self.db.beaker_locks.update_one({'_id': self.identifier, 'owner': None, 'readers': []}, {'$set': {'owner': owner_id, 'timestamp': now}}, upsert=True) return True except pymongo.errors.DuplicateKeyError: if not wait: return False time.sleep(0.2) Beaker-1.10.0/beaker/ext/redisnm.py0000644000076500000240000001105713271426536017116 0ustar amolstaff00000000000000import os import threading import time import pickle try: import redis except ImportError: redis = None from beaker.container import NamespaceManager from beaker.synchronization import SynchronizerImpl from beaker.util import SyncDict, machine_identifier from beaker.crypto.util import sha1 from beaker._compat import string_type, PY2 class RedisNamespaceManager(NamespaceManager): """Provides the :class:`.NamespaceManager` API over Redis. Provided ``url`` can be both a redis connection string or an already existing StrictRedis instance. The data will be stored into redis keys, with their name starting with ``beaker_cache:``. So make sure you provide a specific database number if you don't want to mix them with your own data. """ MAX_KEY_LENGTH = 1024 clients = SyncDict() def __init__(self, namespace, url, timeout=None, **kw): super(RedisNamespaceManager, self).__init__(namespace) self.lock_dir = None # Redis uses redis itself for locking. self.timeout = timeout if redis is None: raise RuntimeError('redis is not available') if isinstance(url, string_type): self.client = RedisNamespaceManager.clients.get(url, redis.StrictRedis.from_url, url) else: self.client = url def _format_key(self, key): if not isinstance(key, str): key = key.decode('ascii') if len(key) > (self.MAX_KEY_LENGTH - len(self.namespace) - len('beaker_cache:') - 1): if not PY2: key = key.encode('utf-8') key = sha1(key).hexdigest() return 'beaker_cache:%s:%s' % (self.namespace, key) def get_creation_lock(self, key): return RedisSynchronizer(self._format_key(key), self.client) def __getitem__(self, key): entry = self.client.get(self._format_key(key)) if entry is None: raise KeyError(key) return pickle.loads(entry) def __contains__(self, key): return self.client.exists(self._format_key(key)) def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): value = pickle.dumps(value) if expiretime is None and self.timeout is not None: expiretime = self.timeout if expiretime is not None: self.client.setex(self._format_key(key), int(expiretime), value) else: self.client.set(self._format_key(key), value) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): self.client.delete(self._format_key(key)) def do_remove(self): for k in self.keys(): self.client.delete(k) def keys(self): return self.client.keys('beaker_cache:%s:*' % self.namespace) class RedisSynchronizer(SynchronizerImpl): """Synchronizer based on redis. Provided ``url`` can be both a redis connection string or an already existing StrictRedis instance. This Synchronizer only supports 1 reader or 1 writer at time, not concurrent readers. """ # If a cache entry generation function can take a lot, # but 15 minutes is more than a reasonable time. LOCK_EXPIRATION = 900 MACHINE_ID = machine_identifier() def __init__(self, identifier, url): super(RedisSynchronizer, self).__init__() self.identifier = 'beaker_lock:%s' % identifier if isinstance(url, string_type): self.client = RedisNamespaceManager.clients.get(url, redis.StrictRedis.from_url, url) else: self.client = url def _get_owner_id(self): return ( '%s-%s-%s' % (self.MACHINE_ID, os.getpid(), threading.current_thread().ident) ).encode('ascii') def do_release_read_lock(self): self.do_release_write_lock() def do_acquire_read_lock(self, wait): self.do_acquire_write_lock(wait) def do_release_write_lock(self): identifier = self.identifier owner_id = self._get_owner_id() def execute_release(pipe): lock_value = pipe.get(identifier) if lock_value == owner_id: pipe.delete(identifier) self.client.transaction(execute_release, identifier) def do_acquire_write_lock(self, wait): owner_id = self._get_owner_id() while True: if self.client.setnx(self.identifier, owner_id): self.client.pexpire(self.identifier, self.LOCK_EXPIRATION * 1000) return True if not wait: return False time.sleep(0.2) Beaker-1.10.0/beaker/ext/sqla.py0000644000076500000240000001116112473366102016404 0ustar amolstaff00000000000000from beaker._compat import pickle import logging import pickle from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer, null_synchronizer from beaker.util import verify_directory, SyncDict log = logging.getLogger(__name__) sa = None class SqlaNamespaceManager(OpenResourceNamespaceManager): binds = SyncDict() tables = SyncDict() @classmethod def _init_dependencies(cls): global sa if sa is not None: return try: import sqlalchemy as sa except ImportError: raise InvalidCacheBackendError("SQLAlchemy, which is required by " "this backend, is not installed") def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, **kwargs): """Create a namespace manager for use with a database table via SQLAlchemy. ``bind`` SQLAlchemy ``Engine`` or ``Connection`` object ``table`` SQLAlchemy ``Table`` object in which to store namespace data. This should usually be something created by ``make_cache_table``. """ OpenResourceNamespaceManager.__init__(self, namespace) if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_db_lock" if self.lock_dir: verify_directory(self.lock_dir) self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), lambda: table) self.hash = {} self._is_new = False self.loaded = False def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): return file_synchronizer( identifier="databasecontainer/funclock/%s" % self.namespace, lock_dir=self.lock_dir) def do_open(self, flags, replace): if self.loaded: self.flags = flags return select = sa.select([self.table.c.data], (self.table.c.namespace == self.namespace)) result = self.bind.execute(select).fetchone() if not result: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = result['data'] except (IOError, OSError, EOFError, pickle.PickleError, pickle.PickleError): log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): if self._is_new: insert = self.table.insert() self.bind.execute(insert, namespace=self.namespace, data=self.hash, accessed=datetime.now(), created=datetime.now()) self._is_new = False else: update = self.table.update(self.table.c.namespace == self.namespace) self.bind.execute(update, data=self.hash, accessed=datetime.now()) self.flags = None def do_remove(self): delete = self.table.delete(self.table.c.namespace == self.namespace) self.bind.execute(delete) self.hash = {} self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return key in self.hash def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class SqlaContainer(Container): namespace_manager = SqlaNamespaceManager def make_cache_table(metadata, table_name='beaker_cache', schema_name=None): """Return a ``Table`` object suitable for storing cached values for the namespace manager. Do not create the table.""" return sa.Table(table_name, metadata, sa.Column('namespace', sa.String(255), primary_key=True), sa.Column('accessed', sa.DateTime, nullable=False), sa.Column('created', sa.DateTime, nullable=False), sa.Column('data', sa.PickleType, nullable=False), schema=schema_name if schema_name else metadata.schema) Beaker-1.10.0/beaker/middleware.py0000644000076500000240000001452213121217746016765 0ustar amolstaff00000000000000import warnings try: from paste.registry import StackedObjectProxy beaker_session = StackedObjectProxy(name="Beaker Session") beaker_cache = StackedObjectProxy(name="Cache Manager") except: beaker_cache = None beaker_session = None from beaker.cache import CacheManager from beaker.session import Session, SessionObject from beaker.util import coerce_cache_params, coerce_session_params, \ parse_cache_config_options class CacheMiddleware(object): cache = beaker_cache def __init__(self, app, config=None, environ_key='beaker.cache', **kwargs): """Initialize the Cache Middleware The Cache middleware will make a CacheManager instance available every request under the ``environ['beaker.cache']`` key by default. The location in environ can be changed by setting ``environ_key``. ``config`` dict All settings should be prefixed by 'cache.'. This method of passing variables is intended for Paste and other setups that accumulate multiple component settings in a single dictionary. If config contains *no cache. prefixed args*, then *all* of the config options will be used to intialize the Cache objects. ``environ_key`` Location where the Cache instance will keyed in the WSGI environ ``**kwargs`` All keyword arguments are assumed to be cache settings and will override any settings found in ``config`` """ self.app = app config = config or {} self.options = {} # Update the options with the parsed config self.options.update(parse_cache_config_options(config)) # Add any options from kwargs, but leave out the defaults this # time self.options.update( parse_cache_config_options(kwargs, include_defaults=False)) # Assume all keys are intended for cache if none are prefixed with # 'cache.' if not self.options and config: self.options = config self.options.update(kwargs) self.cache_manager = CacheManager(**self.options) self.environ_key = environ_key def __call__(self, environ, start_response): if environ.get('paste.registry'): if environ['paste.registry'].reglist: environ['paste.registry'].register(self.cache, self.cache_manager) environ[self.environ_key] = self.cache_manager return self.app(environ, start_response) class SessionMiddleware(object): session = beaker_session def __init__(self, wrap_app, config=None, environ_key='beaker.session', **kwargs): """Initialize the Session Middleware The Session middleware will make a lazy session instance available every request under the ``environ['beaker.session']`` key by default. The location in environ can be changed by setting ``environ_key``. ``config`` dict All settings should be prefixed by 'session.'. This method of passing variables is intended for Paste and other setups that accumulate multiple component settings in a single dictionary. If config contains *no session. prefixed args*, then *all* of the config options will be used to intialize the Session objects. ``environ_key`` Location where the Session instance will keyed in the WSGI environ ``**kwargs`` All keyword arguments are assumed to be session settings and will override any settings found in ``config`` """ config = config or {} # Load up the default params self.options = dict(invalidate_corrupt=True, type=None, data_dir=None, key='beaker.session.id', timeout=None, save_accessed_time=True, secret=None, log_file=None) # Pull out any config args meant for beaker session. if there are any for dct in [config, kwargs]: for key, val in dct.items(): if key.startswith('beaker.session.'): self.options[key[15:]] = val if key.startswith('session.'): self.options[key[8:]] = val if key.startswith('session_'): warnings.warn('Session options should start with session. ' 'instead of session_.', DeprecationWarning, 2) self.options[key[8:]] = val # Coerce and validate session params coerce_session_params(self.options) # Assume all keys are intended for session if none are prefixed with # 'session.' if not self.options and config: self.options = config self.options.update(kwargs) self.wrap_app = self.app = wrap_app self.environ_key = environ_key def __call__(self, environ, start_response): session = SessionObject(environ, **self.options) if environ.get('paste.registry'): if environ['paste.registry'].reglist: environ['paste.registry'].register(self.session, session) environ[self.environ_key] = session environ['beaker.get_session'] = self._get_session if 'paste.testing_variables' in environ and 'webtest_varname' in self.options: environ['paste.testing_variables'][self.options['webtest_varname']] = session def session_start_response(status, headers, exc_info=None): if session.accessed(): session.persist() if session.__dict__['_headers']['set_cookie']: cookie = session.__dict__['_headers']['cookie_out'] if cookie: headers.append(('Set-cookie', cookie)) return start_response(status, headers, exc_info) return self.wrap_app(environ, session_start_response) def _get_session(self): return Session({}, use_cookies=False, **self.options) def session_filter_factory(global_conf, **kwargs): def filter(app): return SessionMiddleware(app, global_conf, **kwargs) return filter def session_filter_app_factory(app, global_conf, **kwargs): return SessionMiddleware(app, global_conf, **kwargs) Beaker-1.10.0/beaker/session.py0000644000076500000240000007561513305311623016336 0ustar amolstaff00000000000000from ._compat import PY2, pickle, http_cookies, unicode_text, b64encode, b64decode, string_type import os import time from datetime import datetime, timedelta from beaker.crypto import hmac as HMAC, hmac_sha1 as SHA1, sha1, get_nonce_size, DEFAULT_NONCE_BITS, get_crypto_module from beaker import crypto, util from beaker.cache import clsmap from beaker.exceptions import BeakerException, InvalidCryptoBackendError from beaker.cookie import SimpleCookie __all__ = ['SignedCookie', 'Session', 'InvalidSignature'] class _InvalidSignatureType(object): """Returned from SignedCookie when the value's signature was invalid.""" def __nonzero__(self): return False def __bool__(self): return False InvalidSignature = _InvalidSignatureType() try: import uuid def _session_id(): return uuid.uuid4().hex except ImportError: import random if hasattr(os, 'getpid'): getpid = os.getpid else: def getpid(): return '' def _session_id(): id_str = "%f%s%f%s" % ( time.time(), id({}), random.random(), getpid() ) # NB: nothing against second parameter to b64encode, but it seems # to be slower than simple chained replacement if not PY2: raw_id = b64encode(sha1(id_str.encode('ascii')).digest()) return str(raw_id.replace(b'+', b'-').replace(b'/', b'_').rstrip(b'=')) else: raw_id = b64encode(sha1(id_str).digest()) return raw_id.replace('+', '-').replace('/', '_').rstrip('=') class SignedCookie(SimpleCookie): """Extends python cookie to give digital signature support""" def __init__(self, secret, input=None): self.secret = secret.encode('UTF-8') http_cookies.BaseCookie.__init__(self, input) def value_decode(self, val): val = val.strip('"') if not val: return None, val sig = HMAC.new(self.secret, val[40:].encode('utf-8'), SHA1).hexdigest() # Avoid timing attacks invalid_bits = 0 input_sig = val[:40] if len(sig) != len(input_sig): return InvalidSignature, val for a, b in zip(sig, input_sig): invalid_bits += a != b if invalid_bits: return InvalidSignature, val else: return val[40:], val def value_encode(self, val): sig = HMAC.new(self.secret, val.encode('utf-8'), SHA1).hexdigest() return str(val), ("%s%s" % (sig, val)) class Session(dict): """Session object that uses container package for storage. :param invalidate_corrupt: How to handle corrupt data when loading. When set to True, then corrupt data will be silently invalidated and a new session created, otherwise invalid data will cause an exception. :type invalidate_corrupt: bool :param use_cookies: Whether or not cookies should be created. When set to False, it is assumed the user will handle storing the session on their own. :type use_cookies: bool :param type: What data backend type should be used to store the underlying session data :param key: The name the cookie should be set to. :param timeout: How long session data is considered valid. This is used regardless of the cookie being present or not to determine whether session data is still valid. Can be set to None to disable session time out. :type timeout: int or None :param save_accessed_time: Whether beaker should save the session's access time (True) or only modification time (False). Defaults to True. :param cookie_expires: Expiration date for cookie :param cookie_domain: Domain to use for the cookie. :param cookie_path: Path to use for the cookie. :param data_serializer: If ``"json"`` or ``"pickle"`` should be used to serialize data. Can also be an object with ``loads` and ``dumps`` methods. By default ``"pickle"`` is used. :param secure: Whether or not the cookie should only be sent over SSL. :param httponly: Whether or not the cookie should only be accessible by the browser not by JavaScript. :param encrypt_key: The key to use for the local session encryption, if not provided the session will not be encrypted. :param validate_key: The key used to sign the local encrypted session :param encrypt_nonce_bits: Number of bits used to generate nonce for encryption key salt. For security reason this is 128bits be default. If you want to keep backward compatibility with sessions generated before 1.8.0 set this to 48. :param crypto_type: encryption module to use :param samesite: SameSite value for the cookie -- should be either 'Lax', 'Strict', or None. """ def __init__(self, request, id=None, invalidate_corrupt=False, use_cookies=True, type=None, data_dir=None, key='beaker.session.id', timeout=None, save_accessed_time=True, cookie_expires=True, cookie_domain=None, cookie_path='/', data_serializer='pickle', secret=None, secure=False, namespace_class=None, httponly=False, encrypt_key=None, validate_key=None, encrypt_nonce_bits=DEFAULT_NONCE_BITS, crypto_type='default', samesite='Lax', **namespace_args): if not type: if data_dir: self.type = 'file' else: self.type = 'memory' else: self.type = type self.namespace_class = namespace_class or clsmap[self.type] self.namespace_args = namespace_args self.request = request self.data_dir = data_dir self.key = key if timeout and not save_accessed_time: raise BeakerException("timeout requires save_accessed_time") self.timeout = timeout # If a timeout was provided, forward it to the backend too, so the backend # can automatically expire entries if it's supported. if self.timeout is not None: # The backend expiration should always be a bit longer than the # session expiration itself to prevent the case where the backend data expires while # the session is being read (PR#153). 2 Minutes seems a reasonable time. self.namespace_args['timeout'] = self.timeout + 60 * 2 self.save_atime = save_accessed_time self.use_cookies = use_cookies self.cookie_expires = cookie_expires self._set_serializer(data_serializer) # Default cookie domain/path self._domain = cookie_domain self._path = cookie_path self.was_invalidated = False self.secret = secret self.secure = secure self.httponly = httponly self.samesite = samesite self.encrypt_key = encrypt_key self.validate_key = validate_key self.encrypt_nonce_size = get_nonce_size(encrypt_nonce_bits) self.crypto_module = get_crypto_module(crypto_type) self.id = id self.accessed_dict = {} self.invalidate_corrupt = invalidate_corrupt if self.use_cookies: cookieheader = request.get('cookie', '') if secret: try: self.cookie = SignedCookie( secret, input=cookieheader, ) except http_cookies.CookieError: self.cookie = SignedCookie( secret, input=None, ) else: self.cookie = SimpleCookie(input=cookieheader) if not self.id and self.key in self.cookie: cookie_data = self.cookie[self.key].value # Should we check invalidate_corrupt here? if cookie_data is InvalidSignature: cookie_data = None self.id = cookie_data self.is_new = self.id is None if self.is_new: self._create_id() self['_accessed_time'] = self['_creation_time'] = time.time() else: try: self.load() except Exception as e: if self.invalidate_corrupt: util.warn( "Invalidating corrupt session %s; " "error was: %s. Set invalidate_corrupt=False " "to propagate this exception." % (self.id, e)) self.invalidate() else: raise def _set_serializer(self, data_serializer): self.data_serializer = data_serializer if self.data_serializer == 'json': self.serializer = util.JsonSerializer() elif self.data_serializer == 'pickle': self.serializer = util.PickleSerializer() elif isinstance(self.data_serializer, string_type): raise BeakerException('Invalid value for data_serializer: %s' % data_serializer) else: self.serializer = data_serializer def has_key(self, name): return name in self def _set_cookie_values(self, expires=None): self.cookie[self.key] = self.id if self._domain: self.cookie[self.key]['domain'] = self._domain if self.secure: self.cookie[self.key]['secure'] = True if self.samesite: self.cookie[self.key]['samesite'] = self.samesite self._set_cookie_http_only() self.cookie[self.key]['path'] = self._path self._set_cookie_expires(expires) def _set_cookie_expires(self, expires): if expires is None: expires = self.cookie_expires if expires is False: expires_date = datetime.fromtimestamp(0x7FFFFFFF) elif isinstance(expires, timedelta): expires_date = datetime.utcnow() + expires elif isinstance(expires, datetime): expires_date = expires elif expires is not True: raise ValueError("Invalid argument for cookie_expires: %s" % repr(self.cookie_expires)) self.cookie_expires = expires if not self.cookie or self.key not in self.cookie: self.cookie[self.key] = self.id if expires is True: self.cookie[self.key]['expires'] = '' return True self.cookie[self.key]['expires'] = \ expires_date.strftime("%a, %d-%b-%Y %H:%M:%S GMT") return expires_date def _update_cookie_out(self, set_cookie=True): self._set_cookie_values() self.request['cookie_out'] = self.cookie[self.key].output(header='') self.request['set_cookie'] = set_cookie def _set_cookie_http_only(self): try: if self.httponly: self.cookie[self.key]['httponly'] = True except http_cookies.CookieError as e: if 'Invalid Attribute httponly' not in str(e): raise util.warn('Python 2.6+ is required to use httponly') def _create_id(self, set_new=True): self.id = _session_id() if set_new: self.is_new = True self.last_accessed = None if self.use_cookies: sc = set_new is False self._update_cookie_out(set_cookie=sc) @property def created(self): return self['_creation_time'] def _set_domain(self, domain): self['_domain'] = self._domain = domain self._update_cookie_out() def _get_domain(self): return self._domain domain = property(_get_domain, _set_domain) def _set_path(self, path): self['_path'] = self._path = path self._update_cookie_out() def _get_path(self): return self._path path = property(_get_path, _set_path) def _encrypt_data(self, session_data=None): """Serialize, encipher, and base64 the session dict""" session_data = session_data or self.copy() if self.encrypt_key: nonce_len, nonce_b64len = self.encrypt_nonce_size nonce = b64encode(os.urandom(nonce_len))[:nonce_b64len] encrypt_key = crypto.generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1, self.crypto_module.getKeyLength()) data = self.serializer.dumps(session_data) return nonce + b64encode(self.crypto_module.aesEncrypt(data, encrypt_key)) else: data = self.serializer.dumps(session_data) return b64encode(data) def _decrypt_data(self, session_data): """Base64, decipher, then un-serialize the data for the session dict""" if self.encrypt_key: __, nonce_b64len = self.encrypt_nonce_size nonce = session_data[:nonce_b64len] encrypt_key = crypto.generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1, self.crypto_module.getKeyLength()) payload = b64decode(session_data[nonce_b64len:]) data = self.crypto_module.aesDecrypt(payload, encrypt_key) else: data = b64decode(session_data) return self.serializer.loads(data) def _delete_cookie(self): self.request['set_cookie'] = True expires = datetime.utcnow() - timedelta(365) self._set_cookie_values(expires) self._update_cookie_out() def delete(self): """Deletes the session from the persistent storage, and sends an expired cookie out""" if self.use_cookies: self._delete_cookie() self.clear() def invalidate(self): """Invalidates this session, creates a new session id, returns to the is_new state""" self.clear() self.was_invalidated = True self._create_id() self.load() def load(self): "Loads the data from this session from persistent storage" self.namespace = self.namespace_class(self.id, data_dir=self.data_dir, digest_filenames=False, **self.namespace_args) now = time.time() if self.use_cookies: self.request['set_cookie'] = True self.namespace.acquire_read_lock() timed_out = False try: self.clear() try: session_data = self.namespace['session'] if (session_data is not None and self.encrypt_key): session_data = self._decrypt_data(session_data) # Memcached always returns a key, its None when its not # present if session_data is None: session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True except (KeyError, TypeError): session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True if session_data is None or len(session_data) == 0: session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True if self.timeout is not None and \ now - session_data['_accessed_time'] > self.timeout: timed_out = True else: # Properly set the last_accessed time, which is different # than the *currently* _accessed_time if self.is_new or '_accessed_time' not in session_data: self.last_accessed = None else: self.last_accessed = session_data['_accessed_time'] # Update the current _accessed_time session_data['_accessed_time'] = now # Set the path if applicable if '_path' in session_data: self._path = session_data['_path'] self.update(session_data) self.accessed_dict = session_data.copy() finally: self.namespace.release_read_lock() if timed_out: self.invalidate() def save(self, accessed_only=False): """Saves the data for this session to persistent storage If accessed_only is True, then only the original data loaded at the beginning of the request will be saved, with the updated last accessed time. """ # Look to see if its a new session that was only accessed # Don't save it under that case if accessed_only and (self.is_new or not self.save_atime): return None # this session might not have a namespace yet or the session id # might have been regenerated if not hasattr(self, 'namespace') or self.namespace.namespace != self.id: self.namespace = self.namespace_class( self.id, data_dir=self.data_dir, digest_filenames=False, **self.namespace_args) self.namespace.acquire_write_lock(replace=True) try: if accessed_only: data = dict(self.accessed_dict.items()) else: data = dict(self.items()) if self.encrypt_key: data = self._encrypt_data(data) # Save the data if not data and 'session' in self.namespace: del self.namespace['session'] else: self.namespace['session'] = data finally: self.namespace.release_write_lock() if self.use_cookies and self.is_new: self.request['set_cookie'] = True def revert(self): """Revert the session to its original state from its first access in the request""" self.clear() self.update(self.accessed_dict) def regenerate_id(self): """ creates a new session id, retains all session data Its a good security practice to regnerate the id after a client elevates privileges. """ self._create_id(set_new=False) # TODO: I think both these methods should be removed. They're from # the original mod_python code i was ripping off but they really # have no use here. def lock(self): """Locks this session against other processes/threads. This is automatic when load/save is called. ***use with caution*** and always with a corresponding 'unlock' inside a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.acquire_write_lock() def unlock(self): """Unlocks this session against other processes/threads. This is automatic when load/save is called. ***use with caution*** and always within a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.release_write_lock() class CookieSession(Session): """Pure cookie-based session Options recognized when using cookie-based sessions are slightly more restricted than general sessions. :param key: The name the cookie should be set to. :param timeout: How long session data is considered valid. This is used regardless of the cookie being present or not to determine whether session data is still valid. :type timeout: int :param save_accessed_time: Whether beaker should save the session's access time (True) or only modification time (False). Defaults to True. :param cookie_expires: Expiration date for cookie :param cookie_domain: Domain to use for the cookie. :param cookie_path: Path to use for the cookie. :param data_serializer: If ``"json"`` or ``"pickle"`` should be used to serialize data. Can also be an object with ``loads` and ``dumps`` methods. By default ``"pickle"`` is used. :param secure: Whether or not the cookie should only be sent over SSL. :param httponly: Whether or not the cookie should only be accessible by the browser not by JavaScript. :param encrypt_key: The key to use for the local session encryption, if not provided the session will not be encrypted. :param validate_key: The key used to sign the local encrypted session :param invalidate_corrupt: How to handle corrupt data when loading. When set to True, then corrupt data will be silently invalidated and a new session created, otherwise invalid data will cause an exception. :type invalidate_corrupt: bool :param crypto_type: The crypto module to use. :param samesite: SameSite value for the cookie -- should be either 'Lax', 'Strict', or None. """ def __init__(self, request, key='beaker.session.id', timeout=None, save_accessed_time=True, cookie_expires=True, cookie_domain=None, cookie_path='/', encrypt_key=None, validate_key=None, secure=False, httponly=False, data_serializer='pickle', encrypt_nonce_bits=DEFAULT_NONCE_BITS, invalidate_corrupt=False, crypto_type='default', samesite='Lax', **kwargs): self.crypto_module = get_crypto_module(crypto_type) if encrypt_key and not self.crypto_module.has_aes: raise InvalidCryptoBackendError("No AES library is installed, can't generate " "encrypted cookie-only Session.") self.request = request self.key = key self.timeout = timeout self.save_atime = save_accessed_time self.cookie_expires = cookie_expires self.encrypt_key = encrypt_key self.validate_key = validate_key self.encrypt_nonce_size = get_nonce_size(encrypt_nonce_bits) self.request['set_cookie'] = False self.secure = secure self.httponly = httponly self.samesite = samesite self._domain = cookie_domain self._path = cookie_path self.invalidate_corrupt = invalidate_corrupt self._set_serializer(data_serializer) try: cookieheader = request['cookie'] except KeyError: cookieheader = '' if validate_key is None: raise BeakerException("No validate_key specified for Cookie only " "Session.") if timeout and not save_accessed_time: raise BeakerException("timeout requires save_accessed_time") try: self.cookie = SignedCookie( validate_key, input=cookieheader, ) except http_cookies.CookieError: self.cookie = SignedCookie( validate_key, input=None, ) self['_id'] = _session_id() self.is_new = True # If we have a cookie, load it if self.key in self.cookie and self.cookie[self.key].value is not None: self.is_new = False try: cookie_data = self.cookie[self.key].value if cookie_data is InvalidSignature: raise BeakerException("Invalid signature") self.update(self._decrypt_data(cookie_data)) self._path = self.get('_path', '/') except Exception as e: if self.invalidate_corrupt: util.warn( "Invalidating corrupt session %s; " "error was: %s. Set invalidate_corrupt=False " "to propagate this exception." % (self.id, e)) self.invalidate() else: raise if self.timeout is not None: now = time.time() last_accessed_time = self.get('_accessed_time', now) if now - last_accessed_time > self.timeout: self.clear() self.accessed_dict = self.copy() self._create_cookie() def created(self): return self['_creation_time'] created = property(created) def id(self): return self['_id'] id = property(id) def _set_domain(self, domain): self['_domain'] = domain self._domain = domain def _get_domain(self): return self._domain domain = property(_get_domain, _set_domain) def _set_path(self, path): self['_path'] = self._path = path def _get_path(self): return self._path path = property(_get_path, _set_path) def save(self, accessed_only=False): """Saves the data for this session to persistent storage""" if accessed_only and (self.is_new or not self.save_atime): return if accessed_only: self.clear() self.update(self.accessed_dict) self._create_cookie() def expire(self): """Delete the 'expires' attribute on this Session, if any.""" self.pop('_expires', None) def _create_cookie(self): if '_creation_time' not in self: self['_creation_time'] = time.time() if '_id' not in self: self['_id'] = _session_id() self['_accessed_time'] = time.time() val = self._encrypt_data() if len(val) > 4064: raise BeakerException("Cookie value is too long to store") self.cookie[self.key] = val if '_expires' in self: expires = self['_expires'] else: expires = None expires = self._set_cookie_expires(expires) if expires is not None: self['_expires'] = expires if '_domain' in self: self.cookie[self.key]['domain'] = self['_domain'] elif self._domain: self.cookie[self.key]['domain'] = self._domain if self.secure: self.cookie[self.key]['secure'] = True self._set_cookie_http_only() self.cookie[self.key]['path'] = self.get('_path', '/') self.request['cookie_out'] = self.cookie[self.key].output(header='') self.request['set_cookie'] = True def delete(self): """Delete the cookie, and clear the session""" # Send a delete cookie request self._delete_cookie() self.clear() def invalidate(self): """Clear the contents and start a new session""" self.clear() self['_id'] = _session_id() class SessionObject(object): """Session proxy/lazy creator This object proxies access to the actual session object, so that in the case that the session hasn't been used before, it will be setup. This avoid creating and loading the session from persistent storage unless its actually used during the request. """ def __init__(self, environ, **params): self.__dict__['_params'] = params self.__dict__['_environ'] = environ self.__dict__['_sess'] = None self.__dict__['_headers'] = {} def _session(self): """Lazy initial creation of session object""" if self.__dict__['_sess'] is None: params = self.__dict__['_params'] environ = self.__dict__['_environ'] self.__dict__['_headers'] = req = {'cookie_out': None} req['cookie'] = environ.get('HTTP_COOKIE') session_cls = params.get('session_class', None) if session_cls is None: if params.get('type') == 'cookie': session_cls = CookieSession else: session_cls = Session else: assert issubclass(session_cls, Session),\ "Not a Session: " + session_cls self.__dict__['_sess'] = session_cls(req, **params) return self.__dict__['_sess'] def __getattr__(self, attr): return getattr(self._session(), attr) def __setattr__(self, attr, value): setattr(self._session(), attr, value) def __delattr__(self, name): self._session().__delattr__(name) def __getitem__(self, key): return self._session()[key] def __setitem__(self, key, value): self._session()[key] = value def __delitem__(self, key): self._session().__delitem__(key) def __repr__(self): return self._session().__repr__() def __iter__(self): """Only works for proxying to a dict""" return iter(self._session().keys()) def __contains__(self, key): return key in self._session() def has_key(self, key): return key in self._session() def get_by_id(self, id): """Loads a session given a session ID""" params = self.__dict__['_params'] session = Session({}, use_cookies=False, id=id, **params) if session.is_new: return None return session def save(self): self.__dict__['_dirty'] = True def delete(self): self.__dict__['_dirty'] = True self._session().delete() def persist(self): """Persist the session to the storage Always saves the whole session if save() or delete() have been called. If they haven't: - If autosave is set to true, saves the the entire session regardless. - If save_accessed_time is set to true or unset, only saves the updated access time. - If save_accessed_time is set to false, doesn't save anything. """ if self.__dict__['_params'].get('auto'): self._session().save() elif self.__dict__['_params'].get('save_accessed_time', True): if self.dirty(): self._session().save() else: self._session().save(accessed_only=True) else: # save_accessed_time is false if self.dirty(): self._session().save() def dirty(self): """Returns True if save() or delete() have been called""" return self.__dict__.get('_dirty', False) def accessed(self): """Returns whether or not the session has been accessed""" return self.__dict__['_sess'] is not None Beaker-1.10.0/beaker/synchronization.py0000644000076500000240000002640513262740345020116 0ustar amolstaff00000000000000"""Synchronization functions. File- and mutex-based mutual exclusion synchronizers are provided, as well as a name-based mutex which locks within an application based on a string name. """ import errno import os import sys import tempfile try: import threading as _threading except ImportError: import dummy_threading as _threading # check for fcntl module try: sys.getwindowsversion() has_flock = False except: try: import fcntl has_flock = True except ImportError: has_flock = False from beaker import util from beaker.exceptions import LockError __all__ = ["file_synchronizer", "mutex_synchronizer", "null_synchronizer", "NameLock", "_threading"] class NameLock(object): """a proxy for an RLock object that is stored in a name based registry. Multiple threads can get a reference to the same RLock based on the name alone, and synchronize operations related to that name. """ locks = util.WeakValuedRegistry() class NLContainer(object): def __init__(self, reentrant): if reentrant: self.lock = _threading.RLock() else: self.lock = _threading.Lock() def __call__(self): return self.lock def __init__(self, identifier=None, reentrant=False): if identifier is None: self._lock = NameLock.NLContainer(reentrant) else: self._lock = NameLock.locks.get(identifier, NameLock.NLContainer, reentrant) def acquire(self, wait=True): return self._lock().acquire(wait) def release(self): self._lock().release() _synchronizers = util.WeakValuedRegistry() def _synchronizer(identifier, cls, **kwargs): return _synchronizers.sync_get((identifier, cls), cls, identifier, **kwargs) def file_synchronizer(identifier, **kwargs): if not has_flock or 'lock_dir' not in kwargs: return mutex_synchronizer(identifier) else: return _synchronizer(identifier, FileSynchronizer, **kwargs) def mutex_synchronizer(identifier, **kwargs): return _synchronizer(identifier, ConditionSynchronizer, **kwargs) class null_synchronizer(object): """A 'null' synchronizer, which provides the :class:`.SynchronizerImpl` interface without any locking. """ def acquire_write_lock(self, wait=True): return True def acquire_read_lock(self): pass def release_write_lock(self): pass def release_read_lock(self): pass acquire = acquire_write_lock release = release_write_lock class SynchronizerImpl(object): """Base class for a synchronization object that allows multiple readers, single writers. """ def __init__(self): self._state = util.ThreadLocal() class SyncState(object): __slots__ = 'reentrantcount', 'writing', 'reading' def __init__(self): self.reentrantcount = 0 self.writing = False self.reading = False def state(self): if not self._state.has(): state = SynchronizerImpl.SyncState() self._state.put(state) return state else: return self._state.get() state = property(state) def release_read_lock(self): state = self.state if state.writing: raise LockError("lock is in writing state") if not state.reading: raise LockError("lock is not in reading state") if state.reentrantcount == 1: self.do_release_read_lock() state.reading = False state.reentrantcount -= 1 def acquire_read_lock(self, wait=True): state = self.state if state.writing: raise LockError("lock is in writing state") if state.reentrantcount == 0: x = self.do_acquire_read_lock(wait) if (wait or x): state.reentrantcount += 1 state.reading = True return x elif state.reading: state.reentrantcount += 1 return True def release_write_lock(self): state = self.state if state.reading: raise LockError("lock is in reading state") if not state.writing: raise LockError("lock is not in writing state") if state.reentrantcount == 1: self.do_release_write_lock() state.writing = False state.reentrantcount -= 1 release = release_write_lock def acquire_write_lock(self, wait=True): state = self.state if state.reading: raise LockError("lock is in reading state") if state.reentrantcount == 0: x = self.do_acquire_write_lock(wait) if (wait or x): state.reentrantcount += 1 state.writing = True return x elif state.writing: state.reentrantcount += 1 return True acquire = acquire_write_lock def do_release_read_lock(self): raise NotImplementedError() def do_acquire_read_lock(self, wait): raise NotImplementedError() def do_release_write_lock(self): raise NotImplementedError() def do_acquire_write_lock(self, wait): raise NotImplementedError() class FileSynchronizer(SynchronizerImpl): """A synchronizer which locks using flock(). """ def __init__(self, identifier, lock_dir): super(FileSynchronizer, self).__init__() self._filedescriptor = util.ThreadLocal() if lock_dir is None: lock_dir = tempfile.gettempdir() else: lock_dir = lock_dir self.filename = util.encoded_path( lock_dir, [identifier], extension='.lock' ) self.lock_dir = os.path.dirname(self.filename) def _filedesc(self): return self._filedescriptor.get() _filedesc = property(_filedesc) def _ensuredir(self): if not os.path.exists(self.lock_dir): util.verify_directory(self.lock_dir) def _open(self, mode): filedescriptor = self._filedesc if filedescriptor is None: self._ensuredir() filedescriptor = os.open(self.filename, mode) self._filedescriptor.put(filedescriptor) return filedescriptor def do_acquire_read_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_RDONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_SH) return True def do_acquire_write_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_WRONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_EX | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_EX) return True def do_release_read_lock(self): self._release_all_locks() def do_release_write_lock(self): self._release_all_locks() def _release_all_locks(self): filedescriptor = self._filedesc if filedescriptor is not None: fcntl.flock(filedescriptor, fcntl.LOCK_UN) os.close(filedescriptor) self._filedescriptor.remove() class ConditionSynchronizer(SynchronizerImpl): """a synchronizer using a Condition.""" def __init__(self, identifier): super(ConditionSynchronizer, self).__init__() # counts how many asynchronous methods are executing self.asynch = 0 # pointer to thread that is the current sync operation self.current_sync_operation = None # condition object to lock on self.condition = _threading.Condition(_threading.Lock()) def do_acquire_read_lock(self, wait=True): self.condition.acquire() try: # see if a synchronous operation is waiting to start # or is already running, in which case we wait (or just # give up and return) if wait: while self.current_sync_operation is not None: self.condition.wait() else: if self.current_sync_operation is not None: return False self.asynch += 1 finally: self.condition.release() if not wait: return True def do_release_read_lock(self): self.condition.acquire() try: self.asynch -= 1 # check if we are the last asynchronous reader thread # out the door. if self.asynch == 0: # yes. so if a sync operation is waiting, notifyAll to wake # it up if self.current_sync_operation is not None: self.condition.notifyAll() elif self.asynch < 0: raise LockError("Synchronizer error - too many " "release_read_locks called") finally: self.condition.release() def do_acquire_write_lock(self, wait=True): self.condition.acquire() try: # here, we are not a synchronous reader, and after returning, # assuming waiting or immediate availability, we will be. if wait: # if another sync is working, wait while self.current_sync_operation is not None: self.condition.wait() else: # if another sync is working, # we dont want to wait, so forget it if self.current_sync_operation is not None: return False # establish ourselves as the current sync # this indicates to other read/write operations # that they should wait until this is None again self.current_sync_operation = _threading.currentThread() # now wait again for asyncs to finish if self.asynch > 0: if wait: # wait self.condition.wait() else: # we dont want to wait, so forget it self.current_sync_operation = None return False finally: self.condition.release() if not wait: return True def do_release_write_lock(self): self.condition.acquire() try: if self.current_sync_operation is not _threading.currentThread(): raise LockError("Synchronizer error - current thread doesnt " "have the write lock") # reset the current sync operation so # another can get it self.current_sync_operation = None # tell everyone to get ready self.condition.notifyAll() finally: # everyone go !! self.condition.release() Beaker-1.10.0/beaker/util.py0000644000076500000240000004133613240315651015624 0ustar amolstaff00000000000000"""Beaker utilities""" import hashlib import socket import binascii from ._compat import PY2, string_type, unicode_text, NoneType, dictkeyslist, im_class, im_func, pickle, func_signature, \ default_im_func try: import threading as _threading except ImportError: import dummy_threading as _threading from datetime import datetime, timedelta import os import re import string import types import weakref import warnings import sys import inspect import json import zlib from beaker.converters import asbool from beaker import exceptions from threading import local as _tlocal DEFAULT_CACHE_KEY_LENGTH = 250 __all__ = ["ThreadLocal", "WeakValuedRegistry", "SyncDict", "encoded_path", "verify_directory", "serialize", "deserialize"] def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. """ fn.__name__ = name return fn def skip_if(predicate, reason=None): """Skip a test if predicate is true.""" reason = reason or predicate.__name__ from nose import SkipTest def decorate(fn): fn_name = fn.__name__ def maybe(*args, **kw): if predicate(): msg = "'%s' skipped: %s" % ( fn_name, reason) raise SkipTest(msg) else: return fn(*args, **kw) return function_named(maybe, fn_name) return decorate def assert_raises(except_cls, callable_, *args, **kw): """Assert the given exception is raised by the given function + arguments.""" try: callable_(*args, **kw) success = False except except_cls: success = True # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" def verify_directory(dir): """verifies and creates a directory. tries to ignore collisions with other threads and processes.""" tries = 0 while not os.access(dir, os.F_OK): try: tries += 1 os.makedirs(dir) except: if tries > 5: raise def has_self_arg(func): """Return True if the given function has a 'self' argument.""" args = list(func_signature(func).parameters) if args and args[0] in ('self', 'cls'): return True else: return False def warn(msg, stacklevel=3): """Issue a warning.""" if isinstance(msg, string_type): warnings.warn(msg, exceptions.BeakerWarning, stacklevel=stacklevel) else: warnings.warn(msg, stacklevel=stacklevel) def deprecated(message): def wrapper(fn): def deprecated_method(*args, **kargs): warnings.warn(message, DeprecationWarning, 2) return fn(*args, **kargs) # TODO: use decorator ? functools.wrapper ? deprecated_method.__name__ = fn.__name__ deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__) return deprecated_method return wrapper class ThreadLocal(object): """stores a value on a per-thread basis""" __slots__ = '_tlocal' def __init__(self): self._tlocal = _tlocal() def put(self, value): self._tlocal.value = value def has(self): return hasattr(self._tlocal, 'value') def get(self, default=None): return getattr(self._tlocal, 'value', default) def remove(self): del self._tlocal.value class SyncDict(object): """ An efficient/threadsafe singleton map algorithm, a.k.a. "get a value based on this key, and create if not found or not valid" paradigm: exists && isvalid ? get : create Designed to work with weakref dictionaries to expect items to asynchronously disappear from the dictionary. Use python 2.3.3 or greater ! a major bug was just fixed in Nov. 2003 that was driving me nuts with garbage collection/weakrefs in this section. """ def __init__(self): self.mutex = _threading.Lock() self.dict = {} def get(self, key, createfunc, *args, **kwargs): try: if key in self.dict: return self.dict[key] else: return self.sync_get(key, createfunc, *args, **kwargs) except KeyError: return self.sync_get(key, createfunc, *args, **kwargs) def sync_get(self, key, createfunc, *args, **kwargs): self.mutex.acquire() try: try: if key in self.dict: return self.dict[key] else: return self._create(key, createfunc, *args, **kwargs) except KeyError: return self._create(key, createfunc, *args, **kwargs) finally: self.mutex.release() def _create(self, key, createfunc, *args, **kwargs): self[key] = obj = createfunc(*args, **kwargs) return obj def has_key(self, key): return key in self.dict def __contains__(self, key): return self.dict.__contains__(key) def __getitem__(self, key): return self.dict.__getitem__(key) def __setitem__(self, key, value): self.dict.__setitem__(key, value) def __delitem__(self, key): return self.dict.__delitem__(key) def clear(self): self.dict.clear() class WeakValuedRegistry(SyncDict): def __init__(self): self.mutex = _threading.RLock() self.dict = weakref.WeakValueDictionary() sha1 = None def encoded_path(root, identifiers, extension=".enc", depth=3, digest_filenames=True): """Generate a unique file-accessible path from the given list of identifiers starting at the given root directory.""" ident = "_".join(identifiers) global sha1 if sha1 is None: from beaker.crypto import sha1 if digest_filenames: if isinstance(ident, unicode_text): ident = sha1(ident.encode('utf-8')).hexdigest() else: ident = sha1(ident).hexdigest() ident = os.path.basename(ident) tokens = [] for d in range(1, depth): tokens.append(ident[0:d]) dir = os.path.join(root, *tokens) verify_directory(dir) return os.path.join(dir, ident + extension) def asint(obj): if isinstance(obj, int): return obj elif isinstance(obj, string_type) and re.match(r'^\d+$', obj): return int(obj) else: raise Exception("This is not a proper int") def verify_options(opt, types, error): if not isinstance(opt, types): if not isinstance(types, tuple): types = (types,) coerced = False for typ in types: try: if typ in (list, tuple): opt = [x.strip() for x in opt.split(',')] else: if typ == bool: typ = asbool elif typ == int: typ = asint elif typ in (timedelta, datetime): if not isinstance(opt, typ): raise Exception("%s requires a timedelta type", typ) opt = typ(opt) coerced = True except: pass if coerced: break if not coerced: raise Exception(error) elif isinstance(opt, str) and not opt.strip(): raise Exception("Empty strings are invalid for: %s" % error) return opt def verify_rules(params, ruleset): for key, types, message in ruleset: if key in params: params[key] = verify_options(params[key], types, message) return params def coerce_session_params(params): rules = [ ('data_dir', (str, NoneType), "data_dir must be a string referring to a directory."), ('lock_dir', (str, NoneType), "lock_dir must be a string referring to a directory."), ('type', (str, NoneType), "Session type must be a string."), ('cookie_expires', (bool, datetime, timedelta, int), "Cookie expires was not a boolean, datetime, int, or timedelta instance."), ('cookie_domain', (str, NoneType), "Cookie domain must be a string."), ('cookie_path', (str, NoneType), "Cookie path must be a string."), ('id', (str,), "Session id must be a string."), ('key', (str,), "Session key must be a string."), ('secret', (str, NoneType), "Session secret must be a string."), ('validate_key', (str, NoneType), "Session encrypt_key must be a string."), ('encrypt_key', (str, NoneType), "Session validate_key must be a string."), ('encrypt_nonce_bits', (int, NoneType), "Session encrypt_nonce_bits must be a number"), ('secure', (bool, NoneType), "Session secure must be a boolean."), ('httponly', (bool, NoneType), "Session httponly must be a boolean."), ('timeout', (int, NoneType), "Session timeout must be an integer."), ('save_accessed_time', (bool, NoneType), "Session save_accessed_time must be a boolean (defaults to true)."), ('auto', (bool, NoneType), "Session is created if accessed."), ('webtest_varname', (str, NoneType), "Session varname must be a string."), ('data_serializer', (str,), "data_serializer must be a string.") ] opts = verify_rules(params, rules) cookie_expires = opts.get('cookie_expires') if cookie_expires and isinstance(cookie_expires, int) and \ not isinstance(cookie_expires, bool): opts['cookie_expires'] = timedelta(seconds=cookie_expires) if opts.get('timeout') is not None and not opts.get('save_accessed_time', True): raise Exception("save_accessed_time must be true to use timeout") return opts def coerce_cache_params(params): rules = [ ('data_dir', (str, NoneType), "data_dir must be a string referring to a directory."), ('lock_dir', (str, NoneType), "lock_dir must be a string referring to a directory."), ('type', (str,), "Cache type must be a string."), ('enabled', (bool, NoneType), "enabled must be true/false if present."), ('expire', (int, NoneType), "expire must be an integer representing how many seconds the cache is valid for"), ('regions', (list, tuple, NoneType), "Regions must be a comma separated list of valid regions"), ('key_length', (int, NoneType), "key_length must be an integer which indicates the longest a key can be before hashing"), ] return verify_rules(params, rules) def coerce_memcached_behaviors(behaviors): rules = [ ('cas', (bool, int), 'cas must be a boolean or an integer'), ('no_block', (bool, int), 'no_block must be a boolean or an integer'), ('receive_timeout', (int,), 'receive_timeout must be an integer'), ('send_timeout', (int,), 'send_timeout must be an integer'), ('ketama_hash', (str,), 'ketama_hash must be a string designating a valid hashing strategy option'), ('_poll_timeout', (int,), '_poll_timeout must be an integer'), ('auto_eject', (bool, int), 'auto_eject must be an integer'), ('retry_timeout', (int,), 'retry_timeout must be an integer'), ('_sort_hosts', (bool, int), '_sort_hosts must be an integer'), ('_io_msg_watermark', (int,), '_io_msg_watermark must be an integer'), ('ketama', (bool, int), 'ketama must be a boolean or an integer'), ('ketama_weighted', (bool, int), 'ketama_weighted must be a boolean or an integer'), ('_io_key_prefetch', (int, bool), '_io_key_prefetch must be a boolean or an integer'), ('_hash_with_prefix_key', (bool, int), '_hash_with_prefix_key must be a boolean or an integer'), ('tcp_nodelay', (bool, int), 'tcp_nodelay must be a boolean or an integer'), ('failure_limit', (int,), 'failure_limit must be an integer'), ('buffer_requests', (bool, int), 'buffer_requests must be a boolean or an integer'), ('_socket_send_size', (int,), '_socket_send_size must be an integer'), ('num_replicas', (int,), 'num_replicas must be an integer'), ('remove_failed', (int,), 'remove_failed must be an integer'), ('_noreply', (bool, int), '_noreply must be a boolean or an integer'), ('_io_bytes_watermark', (int,), '_io_bytes_watermark must be an integer'), ('_socket_recv_size', (int,), '_socket_recv_size must be an integer'), ('distribution', (str,), 'distribution must be a string designating a valid distribution option'), ('connect_timeout', (int,), 'connect_timeout must be an integer'), ('hash', (str,), 'hash must be a string designating a valid hashing option'), ('verify_keys', (bool, int), 'verify_keys must be a boolean or an integer'), ('dead_timeout', (int,), 'dead_timeout must be an integer') ] return verify_rules(behaviors, rules) def parse_cache_config_options(config, include_defaults=True): """Parse configuration options and validate for use with the CacheManager""" # Load default cache options if include_defaults: options = dict(type='memory', data_dir=None, expire=None, log_file=None) else: options = {} for key, val in config.items(): if key.startswith('beaker.cache.'): options[key[13:]] = val if key.startswith('cache.'): options[key[6:]] = val coerce_cache_params(options) # Set cache to enabled if not turned off if 'enabled' not in options and include_defaults: options['enabled'] = True # Configure region dict if regions are available regions = options.pop('regions', None) if regions: region_configs = {} for region in regions: if not region: # ensure region name is valid continue # Setup the default cache options region_options = dict(data_dir=options.get('data_dir'), lock_dir=options.get('lock_dir'), type=options.get('type'), enabled=options['enabled'], expire=options.get('expire'), key_length=options.get('key_length', DEFAULT_CACHE_KEY_LENGTH)) region_prefix = '%s.' % region region_len = len(region_prefix) for key in dictkeyslist(options): if key.startswith(region_prefix): region_options[key[region_len:]] = options.pop(key) coerce_cache_params(region_options) region_configs[region] = region_options options['cache_regions'] = region_configs return options def parse_memcached_behaviors(config): """Parse behavior options and validate for use with pylibmc client/PylibMCNamespaceManager, or potentially other memcached NamespaceManagers that support behaviors""" behaviors = {} for key, val in config.items(): if key.startswith('behavior.'): behaviors[key[9:]] = val coerce_memcached_behaviors(behaviors) return behaviors def func_namespace(func): """Generates a unique namespace for a function""" kls = None if hasattr(func, 'im_func') or hasattr(func, '__func__'): kls = im_class(func) func = im_func(func) if kls: return '%s.%s' % (kls.__module__, kls.__name__) else: return '%s|%s' % (inspect.getsourcefile(func), func.__name__) class PickleSerializer(object): def loads(self, data_string): return pickle.loads(data_string) def dumps(self, data): return pickle.dumps(data, 2) class JsonSerializer(object): def loads(self, data_string): return json.loads(zlib.decompress(data_string).decode('utf-8')) def dumps(self, data): return zlib.compress(json.dumps(data).encode('utf-8')) def serialize(data, method): if method == 'json': serializer = JsonSerializer() else: serializer = PickleSerializer() return serializer.dumps(data) def deserialize(data_string, method): if method == 'json': serializer = JsonSerializer() else: serializer = PickleSerializer() return serializer.loads(data_string) def machine_identifier(): machine_hash = hashlib.md5() if not PY2: machine_hash.update(socket.gethostname().encode()) else: machine_hash.update(socket.gethostname()) return binascii.hexlify(machine_hash.digest()[0:3]).decode('ascii') def safe_write (filepath, contents): if os.name == 'posix': tempname = '%s.temp' % (filepath) fh = open(tempname, 'wb') fh.write(contents) fh.close() os.rename(tempname, filepath) else: fh = open(filepath, 'wb') fh.write(contents) fh.close() Beaker-1.10.0/Beaker.egg-info/0000755000076500000240000000000013305324602015716 5ustar amolstaff00000000000000Beaker-1.10.0/Beaker.egg-info/dependency_links.txt0000644000076500000240000000000113305324602021764 0ustar amolstaff00000000000000 Beaker-1.10.0/Beaker.egg-info/entry_points.txt0000644000076500000240000000076013305324602021217 0ustar amolstaff00000000000000 [paste.filter_factory] beaker_session = beaker.middleware:session_filter_factory [paste.filter_app_factory] beaker_session = beaker.middleware:session_filter_app_factory [beaker.backends] database = beaker.ext.database:DatabaseNamespaceManager memcached = beaker.ext.memcached:MemcachedNamespaceManager google = beaker.ext.google:GoogleNamespaceManager sqla = beaker.ext.sqla:SqlaNamespaceManager Beaker-1.10.0/Beaker.egg-info/not-zip-safe0000644000076500000240000000000112473360657020163 0ustar amolstaff00000000000000 Beaker-1.10.0/Beaker.egg-info/pbr.json0000644000076500000240000000005712563711745017413 0ustar amolstaff00000000000000{"is_release": false, "git_version": "5c407db"}Beaker-1.10.0/Beaker.egg-info/PKG-INFO0000644000076500000240000000740613305324602017022 0ustar amolstaff00000000000000Metadata-Version: 1.1 Name: Beaker Version: 1.10.0 Summary: A Session and Caching library with WSGI Middleware Home-page: https://beaker.readthedocs.io/ Author: Ben Bangert, Mike Bayer, Philip Jenvey, Alessandro Molina Author-email: ben@groovie.org, pjenvey@groovie.org, amol@turbogears.org License: BSD Description-Content-Type: UNKNOWN Description: ========================= Cache and Session Library ========================= About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_ and `TurboGears `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, Redis, MongoDB, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookies to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Caches can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `GitHub repository `_. Contributing ============ Bugs can be filed on GitHub, **should be accompanied by a test case** to retain current code coverage, and should be in a pull request when ready to be accepted into the beaker code-base. Keywords: wsgi myghty session web cache middleware Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: WSGI Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware Beaker-1.10.0/Beaker.egg-info/requires.txt0000644000076500000240000000035113305324602020315 0ustar amolstaff00000000000000funcsigs [crypto] pycryptopp>=0.5.12 [cryptography] cryptography [pycrypto] pycrypto [pycryptodome] pycryptodome [testsuite] nose Mock pycryptodome webtest coverage cryptography sqlalchemy pymongo redis pylibmc python-memcached Beaker-1.10.0/Beaker.egg-info/SOURCES.txt0000644000076500000240000000152013305324602017600 0ustar amolstaff00000000000000README.rst setup.cfg setup.py Beaker.egg-info/PKG-INFO Beaker.egg-info/SOURCES.txt Beaker.egg-info/dependency_links.txt Beaker.egg-info/entry_points.txt Beaker.egg-info/not-zip-safe Beaker.egg-info/pbr.json Beaker.egg-info/requires.txt Beaker.egg-info/top_level.txt beaker/__init__.py beaker/_compat.py beaker/cache.py beaker/container.py beaker/converters.py beaker/cookie.py beaker/exceptions.py beaker/middleware.py beaker/session.py beaker/synchronization.py beaker/util.py beaker/crypto/__init__.py beaker/crypto/jcecrypto.py beaker/crypto/noencryption.py beaker/crypto/nsscrypto.py beaker/crypto/pbkdf2.py beaker/crypto/pyca_cryptography.py beaker/crypto/pycrypto.py beaker/crypto/util.py beaker/ext/__init__.py beaker/ext/database.py beaker/ext/google.py beaker/ext/memcached.py beaker/ext/mongodb.py beaker/ext/redisnm.py beaker/ext/sqla.pyBeaker-1.10.0/Beaker.egg-info/top_level.txt0000644000076500000240000000000713305324602020445 0ustar amolstaff00000000000000beaker Beaker-1.10.0/PKG-INFO0000644000076500000240000000740613305324602014137 0ustar amolstaff00000000000000Metadata-Version: 1.1 Name: Beaker Version: 1.10.0 Summary: A Session and Caching library with WSGI Middleware Home-page: https://beaker.readthedocs.io/ Author: Ben Bangert, Mike Bayer, Philip Jenvey, Alessandro Molina Author-email: ben@groovie.org, pjenvey@groovie.org, amol@turbogears.org License: BSD Description-Content-Type: UNKNOWN Description: ========================= Cache and Session Library ========================= About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_ and `TurboGears `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, Redis, MongoDB, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookies to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Caches can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `GitHub repository `_. Contributing ============ Bugs can be filed on GitHub, **should be accompanied by a test case** to retain current code coverage, and should be in a pull request when ready to be accepted into the beaker code-base. Keywords: wsgi myghty session web cache middleware Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: WSGI Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware Beaker-1.10.0/README.rst0000644000076500000240000000364313211612622014526 0ustar amolstaff00000000000000========================= Cache and Session Library ========================= About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_ and `TurboGears `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, Redis, MongoDB, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookies to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Caches can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `GitHub repository `_. Contributing ============ Bugs can be filed on GitHub, **should be accompanied by a test case** to retain current code coverage, and should be in a pull request when ready to be accepted into the beaker code-base. Beaker-1.10.0/setup.cfg0000644000076500000240000000031713305324602014655 0ustar amolstaff00000000000000[nosetests] where = tests verbose = True detailed-errors = True with-doctest = True cover-package = beaker cover-inclusive = True ignore-files = annotated_functions.py [egg_info] tag_build = tag_date = 0 Beaker-1.10.0/setup.py0000644000076500000240000000727513262740405014565 0ustar amolstaff00000000000000import os import sys import re import inspect from setuptools import setup, find_packages py_version = sys.version_info[:2] here = os.path.abspath(os.path.dirname(__file__)) v = open(os.path.join(here, 'beaker', '__init__.py')) VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v.read()).group(1) v.close() try: README = open(os.path.join(here, 'README.rst')).read() except IOError: README = '' INSTALL_REQUIRES = [] if not hasattr(inspect, 'signature'): # On Python 2.6, 2.7 and 3.2 we need funcsigs dependency INSTALL_REQUIRES.append('funcsigs') TESTS_REQUIRE = ['nose', 'Mock', 'pycryptodome'] if py_version == (2, 6): TESTS_REQUIRE.append('WebTest<2.0.24') else: TESTS_REQUIRE.append('webtest') if py_version == (3, 2): TESTS_REQUIRE.append('coverage < 4.0') else: TESTS_REQUIRE.append('coverage') if py_version == (3, 3): TESTS_REQUIRE.append('cryptography < 2.1.0') else: TESTS_REQUIRE.append('cryptography') if not sys.platform.startswith('java') and not sys.platform == 'cli': if py_version == (2, 6): TESTS_REQUIRE.append('sqlalchemy < 1.2') else: TESTS_REQUIRE.append('sqlalchemy') TESTS_REQUIRE.extend(['pymongo', 'redis']) try: import sqlite3 except ImportError: TESTS_REQUIRE.append('pysqlite') if py_version[0] == 2: TESTS_REQUIRE.extend(['pylibmc', 'python-memcached']) setup(name='Beaker', version=VERSION, description="A Session and Caching library with WSGI Middleware", long_description=README, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware', ], keywords='wsgi myghty session web cache middleware', author='Ben Bangert, Mike Bayer, Philip Jenvey, Alessandro Molina', author_email='ben@groovie.org, pjenvey@groovie.org, amol@turbogears.org', url='https://beaker.readthedocs.io/', license='BSD', packages=find_packages(exclude=['ez_setup', 'examples', 'tests', 'tests.*']), zip_safe=False, install_requires=INSTALL_REQUIRES, extras_require={ 'crypto': ['pycryptopp>=0.5.12'], 'pycrypto': ['pycrypto'], 'pycryptodome': ['pycryptodome'], 'cryptography': ['cryptography'], 'testsuite': [TESTS_REQUIRE] }, test_suite='nose.collector', tests_require=TESTS_REQUIRE, entry_points=""" [paste.filter_factory] beaker_session = beaker.middleware:session_filter_factory [paste.filter_app_factory] beaker_session = beaker.middleware:session_filter_app_factory [beaker.backends] database = beaker.ext.database:DatabaseNamespaceManager memcached = beaker.ext.memcached:MemcachedNamespaceManager google = beaker.ext.google:GoogleNamespaceManager sqla = beaker.ext.sqla:SqlaNamespaceManager """ )