Beaker-1.6.3/0000775000076500000240000000000011723541367012614 5ustar benstaff00000000000000Beaker-1.6.3/.hgignore0000664000076500000240000000024611542263676014424 0ustar benstaff00000000000000 # Automatically generated by `hgimportsvn` syntax:glob .svn *.pyc *.egg-info beaker/docs/build container_file container_dbm tests/*/container_* glob:*.komodoproject Beaker-1.6.3/.hgtags0000664000076500000240000000402011671767016014071 0ustar benstaff000000000000004cfae69a4e425396c8aafad69e620d669f14a986 v0.5 793218d03763817d98c5b6b3ee1e11e68171d8a7 v0.7.5 19d215f1d4489d1b49ca05f41846e3983317e508 v0.8 30ab3a978bec66c3437c29fceadcbc68ef9e9d0b v0.8.1 d3a283832eea93bde5370f6696e09bb710a72547 v0.9 d3a283832eea93bde5370f6696e09bb710a72547 v0.9 05692a4bedf24524666bc59fb9cce6a13f9536ac v0.9.1 4522f4b074ede11e63f07c7027bea5c9ca4f9bcd v0.9.2 80f0cede0705ef6362e539ce4b5f0c87349eeef4 v0.9.3 59237dd1d96262a83685617b2d305b7e55ae86a2 v0.9.4 7ba1c094218b52c2e5d88c3687cab50bbbe81c81 v0.9.5 0c772cbe08f3f6f02bf930b8f0f5fc0e1b225239 v1.0 a3fa827c70246a57f3893c2496c73edf93ab77fe v1.0.1 c258512c54efa7665b777393fcee5c9bd7e5f57a v1.0.2 2fe6b4dbd73f6ce8e46a6709e9b1ca33d31d2df0 v1.0.3 92506e9abe02c6147a15ba6de622b2eb1d9de029 v1.1 2d2ac26bae8e5f6684da9564ba4e78f772fa8519 v1.1.1 93b9dfcbe1dd9a7b78a88d502fb3e0c1cdb13d2e v1.1.2 9fccf9544e4a9596102958e6eaa9f6724d35b064 v1.1.3 b958d0a51dde75a72754c3befab63879552b2048 v1.2 3c524e8a984ba2b6ecfb14055dcbfa56a9709575 v1.2.1 279caf5065d3364b5f4de073b7728cd42f692b39 v1.2.2 648a4ef26097991c40e29b6e8c17d44efe812363 v1.3 0321f770855c4c272d08bd803bd7f37116b9aa9b v1.3.1 345f9530e54d250a0456257ede314debb6acea77 v1.4 2aca643bd310fdb35452374f7cbd913c8dbb7899 v1.4.1 cc263261eeb77d4d98be3a7063be4e16134ed6b6 s cc263261eeb77d4d98be3a7063be4e16134ed6b6 s 0000000000000000000000000000000000000000 s d2c61ed5e5f4907e0fb44d597677b3a1c3aa8856 v1.4.2 c15d2898ba5cfe9f11406e94e8d1e81f5a3d158b 1.5 e0b1c5983fa21c320367e3f2f0bbb3374da7dfa6 1.5.1 3da2e2b3fd25ae1d86193b14a493738ace346773 v1.5.1 c15d2898ba5cfe9f11406e94e8d1e81f5a3d158b 1.5 0000000000000000000000000000000000000000 1.5 e0b1c5983fa21c320367e3f2f0bbb3374da7dfa6 1.5.1 0000000000000000000000000000000000000000 1.5.1 c15d2898ba5cfe9f11406e94e8d1e81f5a3d158b v1.5 554a46f4a946ef5f3cce2469e1a71a1806039b2d v1.5.2 72cef491c002f25ae0c93df7b7633c360aaf29a5 v1.5.3 1389359f2097384d7842394bb5fcb98894b897f7 v1.5.4 123078d900cb10703a76a9616d1c6f874bac72b0 v1.6 bf403d3a8acbc29080fb33ca1d2faf542d290c58 v.1.6.1 0ac0a62ddfd9d2833842c96304761f2526538ed3 v1.6.2 Beaker-1.6.3/beaker/0000775000076500000240000000000011723541367014045 5ustar benstaff00000000000000Beaker-1.6.3/beaker/__init__.py0000664000076500000240000000002611723322420016137 0ustar benstaff00000000000000__version__ = '1.6.3' Beaker-1.6.3/beaker/cache.py0000664000076500000240000005056011660274174015467 0ustar benstaff00000000000000"""This package contains the "front end" classes and functions for Beaker caching. Included are the :class:`.Cache` and :class:`.CacheManager` classes, as well as the function decorators :func:`.region_decorate`, :func:`.region_invalidate`. """ import warnings import beaker.container as container import beaker.util as util from beaker.crypto.util import sha1 from beaker.exceptions import BeakerException, InvalidCacheBackendError from beaker.synchronization import _threading import beaker.ext.memcached as memcached import beaker.ext.database as database import beaker.ext.sqla as sqla import beaker.ext.google as google # Initialize the cache region dict cache_regions = {} """Dictionary of 'region' arguments. A "region" is a string name that refers to a series of cache configuration arguments. An application may have multiple "regions" - one which stores things in a memory cache, one which writes data to files, etc. The dictionary stores string key names mapped to dictionaries of configuration arguments. Example:: from beaker.cache import cache_regions cache_regions.update({ 'short_term':{ 'expire':'60', 'type':'memory' }, 'long_term':{ 'expire':'1800', 'type':'dbm', 'data_dir':'/tmp', } }) """ cache_managers = {} class _backends(object): initialized = False def __init__(self, clsmap): self._clsmap = clsmap self._mutex = _threading.Lock() def __getitem__(self, key): try: return self._clsmap[key] except KeyError, e: if not self.initialized: self._mutex.acquire() try: if not self.initialized: self._init() self.initialized = True return self._clsmap[key] finally: self._mutex.release() raise e def _init(self): try: import pkg_resources # Load up the additional entry point defined backends for entry_point in pkg_resources.iter_entry_points('beaker.backends'): try: namespace_manager = entry_point.load() name = entry_point.name if name in self._clsmap: raise BeakerException("NamespaceManager name conflict,'%s' " "already loaded" % name) self._clsmap[name] = namespace_manager except (InvalidCacheBackendError, SyntaxError): # Ignore invalid backends pass except: import sys from pkg_resources import DistributionNotFound # Warn when there's a problem loading a NamespaceManager if not isinstance(sys.exc_info()[1], DistributionNotFound): import traceback from StringIO import StringIO tb = StringIO() traceback.print_exc(file=tb) warnings.warn( "Unable to load NamespaceManager " "entry point: '%s': %s" % ( entry_point, tb.getvalue()), RuntimeWarning, 2) except ImportError: pass # Initialize the basic available backends clsmap = _backends({ 'memory':container.MemoryNamespaceManager, 'dbm':container.DBMNamespaceManager, 'file':container.FileNamespaceManager, 'ext:memcached':memcached.MemcachedNamespaceManager, 'ext:database':database.DatabaseNamespaceManager, 'ext:sqla': sqla.SqlaNamespaceManager, 'ext:google': google.GoogleNamespaceManager, }) def cache_region(region, *args): """Decorate a function such that its return result is cached, using a "region" to indicate the cache arguments. Example:: from beaker.cache import cache_regions, cache_region # configure regions cache_regions.update({ 'short_term':{ 'expire':'60', 'type':'memory' } }) @cache_region('short_term', 'load_things') def load(search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] The decorator can also be used with object methods. The ``self`` argument is not part of the cache key. This is based on the actual string name ``self`` being in the first argument position (new in 1.6):: class MyThing(object): @cache_region('short_term', 'load_things') def load(self, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] Classmethods work as well - use ``cls`` as the name of the class argument, and place the decorator around the function underneath ``@classmethod`` (new in 1.6):: class MyThing(object): @classmethod @cache_region('short_term', 'load_things') def load(cls, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] :param region: String name of the region corresponding to the desired caching arguments, established in :attr:`.cache_regions`. :param \*args: Optional ``str()``-compatible arguments which will uniquely identify the key used by this decorated function, in addition to the positional arguments passed to the function itself at call time. This is recommended as it is needed to distinguish between any two functions or methods that have the same name (regardless of parent class or not). .. note:: The function being decorated must only be called with positional arguments, and the arguments must support being stringified with ``str()``. The concatenation of the ``str()`` version of each argument, combined with that of the ``*args`` sent to the decorator, forms the unique cache key. .. note:: When a method on a class is decorated, the ``self`` or ``cls`` argument in the first position is not included in the "key" used for caching. New in 1.6. """ return _cache_decorate(args, None, None, region) def region_invalidate(namespace, region, *args): """Invalidate a cache region corresponding to a function decorated with :func:`.cache_region`. :param namespace: The namespace of the cache to invalidate. This is typically a reference to the original function (as returned by the :func:`.cache_region` decorator), where the :func:`.cache_region` decorator applies a "memo" to the function in order to locate the string name of the namespace. :param region: String name of the region used with the decorator. This can be ``None`` in the usual case that the decorated function itself is passed, not the string name of the namespace. :param args: Stringifyable arguments that are used to locate the correct key. This consists of the ``*args`` sent to the :func:`.cache_region` decorator itself, plus the ``*args`` sent to the function itself at runtime. Example:: from beaker.cache import cache_regions, cache_region, region_invalidate # configure regions cache_regions.update({ 'short_term':{ 'expire':'60', 'type':'memory' } }) @cache_region('short_term', 'load_data') def load(search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] def invalidate_search(search_term, limit, offset): '''Invalidate the cached storage for a given search term, limit, offset.''' region_invalidate(load, 'short_term', 'load_data', search_term, limit, offset) Note that when a method on a class is decorated, the first argument ``cls`` or ``self`` is not included in the cache key. This means you don't send it to :func:`.region_invalidate`:: class MyThing(object): @cache_region('short_term', 'some_data') def load(self, search_term, limit, offset): '''Load from a database given a search term, limit, offset.''' return database.query(search_term)[offset:offset + limit] def invalidate_search(self, search_term, limit, offset): '''Invalidate the cached storage for a given search term, limit, offset.''' region_invalidate(self.load, 'short_term', 'some_data', search_term, limit, offset) """ if callable(namespace): if not region: region = namespace._arg_region namespace = namespace._arg_namespace if not region: raise BeakerException("Region or callable function " "namespace is required") else: region = cache_regions[region] cache = Cache._get_cache(namespace, region) _cache_decorator_invalidate(cache, region['key_length'], args) class Cache(object): """Front-end to the containment API implementing a data cache. :param namespace: the namespace of this Cache :param type: type of cache to use :param expire: seconds to keep cached data :param expiretime: seconds to keep cached data (legacy support) :param starttime: time when cache was cache was """ def __init__(self, namespace, type='memory', expiretime=None, starttime=None, expire=None, **nsargs): try: cls = clsmap[type] if isinstance(cls, InvalidCacheBackendError): raise cls except KeyError: raise TypeError("Unknown cache implementation %r" % type) self.namespace_name = namespace self.namespace = cls(namespace, **nsargs) self.expiretime = expiretime or expire self.starttime = starttime self.nsargs = nsargs @classmethod def _get_cache(cls, namespace, kw): key = namespace + str(kw) try: return cache_managers[key] except KeyError: cache_managers[key] = cache = cls(namespace, **kw) return cache def put(self, key, value, **kw): self._get_value(key, **kw).set_value(value) set_value = put def get(self, key, **kw): """Retrieve a cached value from the container""" return self._get_value(key, **kw).get_value() get_value = get def remove_value(self, key, **kw): mycontainer = self._get_value(key, **kw) mycontainer.clear_value() remove = remove_value def _get_value(self, key, **kw): if isinstance(key, unicode): key = key.encode('ascii', 'backslashreplace') if 'type' in kw: return self._legacy_get_value(key, **kw) kw.setdefault('expiretime', self.expiretime) kw.setdefault('starttime', self.starttime) return container.Value(key, self.namespace, **kw) @util.deprecated("Specifying a " "'type' and other namespace configuration with cache.get()/put()/etc. " "is deprecated. Specify 'type' and other namespace configuration to " "cache_manager.get_cache() and/or the Cache constructor instead.") def _legacy_get_value(self, key, type, **kw): expiretime = kw.pop('expiretime', self.expiretime) starttime = kw.pop('starttime', None) createfunc = kw.pop('createfunc', None) kwargs = self.nsargs.copy() kwargs.update(kw) c = Cache(self.namespace.namespace, type=type, **kwargs) return c._get_value(key, expiretime=expiretime, createfunc=createfunc, starttime=starttime) def clear(self): """Clear all the values from the namespace""" self.namespace.remove() # dict interface def __getitem__(self, key): return self.get(key) def __contains__(self, key): return self._get_value(key).has_current_value() def has_key(self, key): return key in self def __delitem__(self, key): self.remove_value(key) def __setitem__(self, key, value): self.put(key, value) class CacheManager(object): def __init__(self, **kwargs): """Initialize a CacheManager object with a set of options Options should be parsed with the :func:`~beaker.util.parse_cache_config_options` function to ensure only valid options are used. """ self.kwargs = kwargs self.regions = kwargs.pop('cache_regions', {}) # Add these regions to the module global cache_regions.update(self.regions) def get_cache(self, name, **kwargs): kw = self.kwargs.copy() kw.update(kwargs) return Cache._get_cache(name, kw) def get_cache_region(self, name, region): if region not in self.regions: raise BeakerException('Cache region not configured: %s' % region) kw = self.regions[region] return Cache._get_cache(name, kw) def region(self, region, *args): """Decorate a function to cache itself using a cache region The region decorator requires arguments if there are more than two of the same named function, in the same module. This is because the namespace used for the functions cache is based on the functions name and the module. Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(): @cache.region('short_term', 'some_data') def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) return load('rabbits', 20, 0) .. note:: The function being decorated must only be called with positional arguments. """ return cache_region(region, *args) def region_invalidate(self, namespace, region, *args): """Invalidate a cache region namespace or decorated function This function only invalidates cache spaces created with the cache_region decorator. :param namespace: Either the namespace of the result to invalidate, or the cached function :param region: The region the function was cached to. If the function was cached to a single region then this argument can be None :param args: Arguments that were used to differentiate the cached function as well as the arguments passed to the decorated function Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(invalidate=False): @cache.region('short_term', 'some_data') def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) # If the results should be invalidated first if invalidate: cache.region_invalidate(load, None, 'some_data', 'rabbits', 20, 0) return load('rabbits', 20, 0) """ return region_invalidate(namespace, region, *args) def cache(self, *args, **kwargs): """Decorate a function to cache itself with supplied parameters :param args: Used to make the key unique for this function, as in region() above. :param kwargs: Parameters to be passed to get_cache(), will override defaults Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(): @cache.cache('mycache', expire=15) def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) return load('rabbits', 20, 0) .. note:: The function being decorated must only be called with positional arguments. """ return _cache_decorate(args, self, kwargs, None) def invalidate(self, func, *args, **kwargs): """Invalidate a cache decorated function This function only invalidates cache spaces created with the cache decorator. :param func: Decorated function to invalidate :param args: Used to make the key unique for this function, as in region() above. :param kwargs: Parameters that were passed for use by get_cache(), note that this is only required if a ``type`` was specified for the function Example:: # Assuming a cache object is available like: cache = CacheManager(dict_of_config_options) def populate_things(invalidate=False): @cache.cache('mycache', type="file", expire=15) def load(search_term, limit, offset): return load_the_data(search_term, limit, offset) # If the results should be invalidated first if invalidate: cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file") return load('rabbits', 20, 0) """ namespace = func._arg_namespace cache = self.get_cache(namespace, **kwargs) if hasattr(func, '_arg_region'): key_length = cache_regions[func._arg_region]['key_length'] else: key_length = kwargs.pop('key_length', 250) _cache_decorator_invalidate(cache, key_length, args) def _cache_decorate(deco_args, manager, kwargs, region): """Return a caching function decorator.""" cache = [None] def decorate(func): namespace = util.func_namespace(func) skip_self = util.has_self_arg(func) def cached(*args): if not cache[0]: if region is not None: if region not in cache_regions: raise BeakerException( 'Cache region not configured: %s' % region) reg = cache_regions[region] if not reg.get('enabled', True): return func(*args) cache[0] = Cache._get_cache(namespace, reg) elif manager: cache[0] = manager.get_cache(namespace, **kwargs) else: raise Exception("'manager + kwargs' or 'region' " "argument is required") if skip_self: try: cache_key = " ".join(map(str, deco_args + args[1:])) except UnicodeEncodeError: cache_key = " ".join(map(unicode, deco_args + args[1:])) else: try: cache_key = " ".join(map(str, deco_args + args)) except UnicodeEncodeError: cache_key = " ".join(map(unicode, deco_args + args)) if region: key_length = cache_regions[region]['key_length'] else: key_length = kwargs.pop('key_length', 250) if len(cache_key) + len(namespace) > key_length: cache_key = sha1(cache_key).hexdigest() def go(): return func(*args) return cache[0].get_value(cache_key, createfunc=go) cached._arg_namespace = namespace if region is not None: cached._arg_region = region return cached return decorate def _cache_decorator_invalidate(cache, key_length, args): """Invalidate a cache key based on function arguments.""" try: cache_key = " ".join(map(str, args)) except UnicodeEncodeError: cache_key = " ".join(map(unicode, args)) if len(cache_key) + len(cache.namespace_name) > key_length: cache_key = sha1(cache_key).hexdigest() cache.remove_value(cache_key) Beaker-1.6.3/beaker/container.py0000664000076500000240000005575411671762332016420 0ustar benstaff00000000000000"""Container and Namespace classes""" import beaker.util as util if util.py3k: import dbm as anydbm else: import anydbm import cPickle import logging import os import time from beaker.exceptions import CreationAbortedError, MissingCacheParameter from beaker.synchronization import _threading, file_synchronizer, \ mutex_synchronizer, NameLock, null_synchronizer __all__ = ['Value', 'Container', 'ContainerContext', 'MemoryContainer', 'DBMContainer', 'NamespaceManager', 'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer', 'OpenResourceNamespaceManager', 'FileNamespaceManager', 'CreationAbortedError'] logger = logging.getLogger('beaker.container') if logger.isEnabledFor(logging.DEBUG): debug = logger.debug else: def debug(message, *args): pass class NamespaceManager(object): """Handles dictionary operations and locking for a namespace of values. :class:`.NamespaceManager` provides a dictionary-like interface, implementing ``__getitem__()``, ``__setitem__()``, and ``__contains__()``, as well as functions related to lock acquisition. The implementation for setting and retrieving the namespace data is handled by subclasses. NamespaceManager may be used alone, or may be accessed by one or more :class:`.Value` objects. :class:`.Value` objects provide per-key services like expiration times and automatic recreation of values. Multiple NamespaceManagers created with a particular name will all share access to the same underlying datasource and will attempt to synchronize against a common mutex object. The scope of this sharing may be within a single process or across multiple processes, depending on the type of NamespaceManager used. The NamespaceManager itself is generally threadsafe, except in the case of the DBMNamespaceManager in conjunction with the gdbm dbm implementation. """ @classmethod def _init_dependencies(cls): """Initialize module-level dependent libraries required by this :class:`.NamespaceManager`.""" def __init__(self, namespace): self._init_dependencies() self.namespace = namespace def get_creation_lock(self, key): """Return a locking object that is used to synchronize multiple threads or processes which wish to generate a new cache value. This function is typically an instance of :class:`.FileSynchronizer`, :class:`.ConditionSynchronizer`, or :class:`.null_synchronizer`. The creation lock is only used when a requested value does not exist, or has been expired, and is only used by the :class:`.Value` key-management object in conjunction with a "createfunc" value-creation function. """ raise NotImplementedError() def do_remove(self): """Implement removal of the entire contents of this :class:`.NamespaceManager`. e.g. for a file-based namespace, this would remove all the files. The front-end to this method is the :meth:`.NamespaceManager.remove` method. """ raise NotImplementedError() def acquire_read_lock(self): """Establish a read lock. This operation is called before a key is read. By default the function does nothing. """ def release_read_lock(self): """Release a read lock. This operation is called after a key is read. By default the function does nothing. """ def acquire_write_lock(self, wait=True, replace=False): """Establish a write lock. This operation is called before a key is written. A return value of ``True`` indicates the lock has been acquired. By default the function returns ``True`` unconditionally. 'replace' is a hint indicating the full contents of the namespace may be safely discarded. Some backends may implement this (i.e. file backend won't unpickle the current contents). """ return True def release_write_lock(self): """Release a write lock. This operation is called after a new value is written. By default this function does nothing. """ def has_key(self, key): """Return ``True`` if the given key is present in this :class:`.Namespace`. """ return self.__contains__(key) def __getitem__(self, key): raise NotImplementedError() def __setitem__(self, key, value): raise NotImplementedError() def set_value(self, key, value, expiretime=None): """Sets a value in this :class:`.NamespaceManager`. This is the same as ``__setitem__()``, but also allows an expiration time to be passed at the same time. """ self[key] = value def __contains__(self, key): raise NotImplementedError() def __delitem__(self, key): raise NotImplementedError() def keys(self): """Return the list of all keys. This method may not be supported by all :class:`.NamespaceManager` implementations. """ raise NotImplementedError() def remove(self): """Remove the entire contents of this :class:`.NamespaceManager`. e.g. for a file-based namespace, this would remove all the files. """ self.do_remove() class OpenResourceNamespaceManager(NamespaceManager): """A NamespaceManager where read/write operations require opening/ closing of a resource which is possibly mutexed. """ def __init__(self, namespace): NamespaceManager.__init__(self, namespace) self.access_lock = self.get_access_lock() self.openers = 0 self.mutex = _threading.Lock() def get_access_lock(self): raise NotImplementedError() def do_open(self, flags, replace): raise NotImplementedError() def do_close(self): raise NotImplementedError() def acquire_read_lock(self): self.access_lock.acquire_read_lock() try: self.open('r', checkcount = True) except: self.access_lock.release_read_lock() raise def release_read_lock(self): try: self.close(checkcount = True) finally: self.access_lock.release_read_lock() def acquire_write_lock(self, wait=True, replace=False): r = self.access_lock.acquire_write_lock(wait) try: if (wait or r): self.open('c', checkcount = True, replace=replace) return r except: self.access_lock.release_write_lock() raise def release_write_lock(self): try: self.close(checkcount=True) finally: self.access_lock.release_write_lock() def open(self, flags, checkcount=False, replace=False): self.mutex.acquire() try: if checkcount: if self.openers == 0: self.do_open(flags, replace) self.openers += 1 else: self.do_open(flags, replace) self.openers = 1 finally: self.mutex.release() def close(self, checkcount=False): self.mutex.acquire() try: if checkcount: self.openers -= 1 if self.openers == 0: self.do_close() else: if self.openers > 0: self.do_close() self.openers = 0 finally: self.mutex.release() def remove(self): self.access_lock.acquire_write_lock() try: self.close(checkcount=False) self.do_remove() finally: self.access_lock.release_write_lock() class Value(object): """Implements synchronization, expiration, and value-creation logic for a single value stored in a :class:`.NamespaceManager`. """ __slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\ 'namespace' def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None): self.key = key self.createfunc = createfunc self.expire_argument = expiretime self.starttime = starttime self.storedtime = -1 self.namespace = namespace def has_value(self): """return true if the container has a value stored. This is regardless of it being expired or not. """ self.namespace.acquire_read_lock() try: return self.namespace.has_key(self.key) finally: self.namespace.release_read_lock() def can_have_value(self): return self.has_current_value() or self.createfunc is not None def has_current_value(self): self.namespace.acquire_read_lock() try: has_value = self.namespace.has_key(self.key) if has_value: try: stored, expired, value = self._get_value() return not self._is_expired(stored, expired) except KeyError: pass return False finally: self.namespace.release_read_lock() def _is_expired(self, storedtime, expiretime): """Return true if this container's value is expired.""" return ( ( self.starttime is not None and storedtime < self.starttime ) or ( expiretime is not None and time.time() >= expiretime + storedtime ) ) def get_value(self): self.namespace.acquire_read_lock() try: has_value = self.has_value() if has_value: try: stored, expired, value = self._get_value() if not self._is_expired(stored, expired): return value except KeyError: # guard against un-mutexed backends raising KeyError has_value = False if not self.createfunc: raise KeyError(self.key) finally: self.namespace.release_read_lock() has_createlock = False creation_lock = self.namespace.get_creation_lock(self.key) if has_value: if not creation_lock.acquire(wait=False): debug("get_value returning old value while new one is created") return value else: debug("lock_creatfunc (didnt wait)") has_createlock = True if not has_createlock: debug("lock_createfunc (waiting)") creation_lock.acquire() debug("lock_createfunc (waited)") try: # see if someone created the value already self.namespace.acquire_read_lock() try: if self.has_value(): try: stored, expired, value = self._get_value() if not self._is_expired(stored, expired): return value except KeyError: # guard against un-mutexed backends raising KeyError pass finally: self.namespace.release_read_lock() debug("get_value creating new value") v = self.createfunc() self.set_value(v) return v finally: creation_lock.release() debug("released create lock") def _get_value(self): value = self.namespace[self.key] try: stored, expired, value = value except ValueError: if not len(value) == 2: raise # Old format: upgrade stored, value = value expired = self.expire_argument debug("get_value upgrading time %r expire time %r", stored, self.expire_argument) self.namespace.release_read_lock() self.set_value(value, stored) self.namespace.acquire_read_lock() except TypeError: # occurs when the value is None. memcached # may yank the rug from under us in which case # that's the result raise KeyError(self.key) return stored, expired, value def set_value(self, value, storedtime=None): self.namespace.acquire_write_lock() try: if storedtime is None: storedtime = time.time() debug("set_value stored time %r expire time %r", storedtime, self.expire_argument) self.namespace.set_value(self.key, (storedtime, self.expire_argument, value)) finally: self.namespace.release_write_lock() def clear_value(self): self.namespace.acquire_write_lock() try: debug("clear_value") if self.namespace.has_key(self.key): try: del self.namespace[self.key] except KeyError: # guard against un-mutexed backends raising KeyError pass self.storedtime = -1 finally: self.namespace.release_write_lock() class AbstractDictionaryNSManager(NamespaceManager): """A subclassable NamespaceManager that places data in a dictionary. Subclasses should provide a "dictionary" attribute or descriptor which returns a dict-like object. The dictionary will store keys that are local to the "namespace" attribute of this manager, so ensure that the dictionary will not be used by any other namespace. e.g.:: import collections cached_data = collections.defaultdict(dict) class MyDictionaryManager(AbstractDictionaryNSManager): def __init__(self, namespace): AbstractDictionaryNSManager.__init__(self, namespace) self.dictionary = cached_data[self.namespace] The above stores data in a global dictionary called "cached_data", which is structured as a dictionary of dictionaries, keyed first on namespace name to a sub-dictionary, then on actual cache key to value. """ def get_creation_lock(self, key): return NameLock( identifier="memorynamespace/funclock/%s/%s" % (self.namespace, key), reentrant=True ) def __getitem__(self, key): return self.dictionary[key] def __contains__(self, key): return self.dictionary.__contains__(key) def has_key(self, key): return self.dictionary.__contains__(key) def __setitem__(self, key, value): self.dictionary[key] = value def __delitem__(self, key): del self.dictionary[key] def do_remove(self): self.dictionary.clear() def keys(self): return self.dictionary.keys() class MemoryNamespaceManager(AbstractDictionaryNSManager): """:class:`.NamespaceManager` that uses a Python dictionary for storage.""" namespaces = util.SyncDict() def __init__(self, namespace, **kwargs): AbstractDictionaryNSManager.__init__(self, namespace) self.dictionary = MemoryNamespaceManager.\ namespaces.get(self.namespace, dict) class DBMNamespaceManager(OpenResourceNamespaceManager): """:class:`.NamespaceManager` that uses ``dbm`` files for storage.""" def __init__(self, namespace, dbmmodule=None, data_dir=None, dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not dbm_dir and not data_dir: raise MissingCacheParameter("data_dir or dbm_dir is required") elif dbm_dir: self.dbm_dir = dbm_dir else: self.dbm_dir = data_dir + "/container_dbm" util.verify_directory(self.dbm_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_dbm_lock" util.verify_directory(self.lock_dir) self.dbmmodule = dbmmodule or anydbm self.dbm = None OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root= self.dbm_dir, identifiers=[self.namespace], extension='.dbm', digest_filenames=self.digest_filenames) debug("data file %s", self.file) self._checkfile() def get_access_lock(self): return file_synchronizer(identifier=self.namespace, lock_dir=self.lock_dir) def get_creation_lock(self, key): return file_synchronizer( identifier = "dbmcontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir=self.lock_dir ) def file_exists(self, file): if os.access(file, os.F_OK): return True else: for ext in ('db', 'dat', 'pag', 'dir'): if os.access(file + os.extsep + ext, os.F_OK): return True return False def _checkfile(self): if not self.file_exists(self.file): g = self.dbmmodule.open(self.file, 'c') g.close() def get_filenames(self): list = [] if os.access(self.file, os.F_OK): list.append(self.file) for ext in ('pag', 'dir', 'db', 'dat'): if os.access(self.file + os.extsep + ext, os.F_OK): list.append(self.file + os.extsep + ext) return list def do_open(self, flags, replace): debug("opening dbm file %s", self.file) try: self.dbm = self.dbmmodule.open(self.file, flags) except: self._checkfile() self.dbm = self.dbmmodule.open(self.file, flags) def do_close(self): if self.dbm is not None: debug("closing dbm file %s", self.file) self.dbm.close() def do_remove(self): for f in self.get_filenames(): os.remove(f) def __getitem__(self, key): return cPickle.loads(self.dbm[key]) def __contains__(self, key): return self.dbm.has_key(key) def __setitem__(self, key, value): self.dbm[key] = cPickle.dumps(value) def __delitem__(self, key): del self.dbm[key] def keys(self): return self.dbm.keys() class FileNamespaceManager(OpenResourceNamespaceManager): """:class:`.NamespaceManager` that uses binary files for storage. Each namespace is implemented as a single file storing a dictionary of key/value pairs, serialized using the Python ``pickle`` module. """ def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not file_dir and not data_dir: raise MissingCacheParameter("data_dir or file_dir is required") elif file_dir: self.file_dir = file_dir else: self.file_dir = data_dir + "/container_file" util.verify_directory(self.file_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_file_lock" util.verify_directory(self.lock_dir) OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root=self.file_dir, identifiers=[self.namespace], extension='.cache', digest_filenames=self.digest_filenames) self.hash = {} debug("data file %s", self.file) def get_access_lock(self): return file_synchronizer(identifier=self.namespace, lock_dir=self.lock_dir) def get_creation_lock(self, key): return file_synchronizer( identifier = "dbmcontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir = self.lock_dir ) def file_exists(self, file): return os.access(file, os.F_OK) def do_open(self, flags, replace): if not replace and self.file_exists(self.file): fh = open(self.file, 'rb') self.hash = cPickle.load(fh) fh.close() self.flags = flags def do_close(self): if self.flags == 'c' or self.flags == 'w': fh = open(self.file, 'wb') cPickle.dump(self.hash, fh) fh.close() self.hash = {} self.flags = None def do_remove(self): try: os.remove(self.file) except OSError, err: # for instance, because we haven't yet used this cache, # but client code has asked for a clear() operation... pass self.hash = {} def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return self.hash.has_key(key) def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() #### legacy stuff to support the old "Container" class interface namespace_classes = {} ContainerContext = dict class ContainerMeta(type): def __init__(cls, classname, bases, dict_): namespace_classes[cls] = cls.namespace_class return type.__init__(cls, classname, bases, dict_) def __call__(self, key, context, namespace, createfunc=None, expiretime=None, starttime=None, **kwargs): if namespace in context: ns = context[namespace] else: nscls = namespace_classes[self] context[namespace] = ns = nscls(namespace, **kwargs) return Value(key, ns, createfunc=createfunc, expiretime=expiretime, starttime=starttime) class Container(object): """Implements synchronization and value-creation logic for a 'value' stored in a :class:`.NamespaceManager`. :class:`.Container` and its subclasses are deprecated. The :class:`.Value` class is now used for this purpose. """ __metaclass__ = ContainerMeta namespace_class = NamespaceManager class FileContainer(Container): namespace_class = FileNamespaceManager class MemoryContainer(Container): namespace_class = MemoryNamespaceManager class DBMContainer(Container): namespace_class = DBMNamespaceManager DbmContainer = DBMContainer Beaker-1.6.3/beaker/converters.py0000664000076500000240000000153710761620546016615 0ustar benstaff00000000000000# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php def asbool(obj): if isinstance(obj, (str, unicode)): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError( "String is not true/false: %r" % obj) return bool(obj) def aslist(obj, sep=None, strip=True): if isinstance(obj, (str, unicode)): lst = obj.split(sep) if strip: lst = [v.strip() for v in lst] return lst elif isinstance(obj, (list, tuple)): return obj elif obj is None: return [] else: return [obj] Beaker-1.6.3/beaker/crypto/0000775000076500000240000000000011723541367015365 5ustar benstaff00000000000000Beaker-1.6.3/beaker/crypto/__init__.py0000664000076500000240000000232111361126312017457 0ustar benstaff00000000000000from warnings import warn from beaker.crypto.pbkdf2 import PBKDF2, strxor from beaker.crypto.util import hmac, sha1, hmac_sha1, md5 from beaker import util keyLength = None if util.jython: try: from beaker.crypto.jcecrypto import getKeyLength, aesEncrypt keyLength = getKeyLength() except ImportError: pass else: try: from beaker.crypto.pycrypto import getKeyLength, aesEncrypt, aesDecrypt keyLength = getKeyLength() except ImportError: pass if not keyLength: has_aes = False else: has_aes = True if has_aes and keyLength < 32: warn('Crypto implementation only supports key lengths up to %d bits. ' 'Generated session cookies may be incompatible with other ' 'environments' % (keyLength * 8)) def generateCryptoKeys(master_key, salt, iterations): # NB: We XOR parts of the keystream into the randomly-generated parts, just # in case os.urandom() isn't as random as it should be. Note that if # os.urandom() returns truly random data, this will have no effect on the # overall security. keystream = PBKDF2(master_key, salt, iterations=iterations) cipher_key = keystream.read(keyLength) return cipher_key Beaker-1.6.3/beaker/crypto/jcecrypto.py0000664000076500000240000000201311341054467017731 0ustar benstaff00000000000000""" Encryption module that uses the Java Cryptography Extensions (JCE). Note that in default installations of the Java Runtime Environment, the maximum key length is limited to 128 bits due to US export restrictions. This makes the generated keys incompatible with the ones generated by pycryptopp, which has no such restrictions. To fix this, download the "Unlimited Strength Jurisdiction Policy Files" from Sun, which will allow encryption using 256 bit AES keys. """ from javax.crypto import Cipher from javax.crypto.spec import SecretKeySpec, IvParameterSpec import jarray # Initialization vector filled with zeros _iv = IvParameterSpec(jarray.zeros(16, 'b')) def aesEncrypt(data, key): cipher = Cipher.getInstance('AES/CTR/NoPadding') skeySpec = SecretKeySpec(key, 'AES') cipher.init(Cipher.ENCRYPT_MODE, skeySpec, _iv) return cipher.doFinal(data).tostring() # magic. aesDecrypt = aesEncrypt def getKeyLength(): maxlen = Cipher.getMaxAllowedKeyLength('AES/CTR/NoPadding') return min(maxlen, 256) / 8 Beaker-1.6.3/beaker/crypto/pbkdf2.py0000664000076500000240000002710011542263524017102 0ustar benstaff00000000000000#!/usr/bin/python # -*- coding: ascii -*- ########################################################################### # PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation # # Copyright (C) 2007 Dwayne C. Litzenberger # All rights reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation. # # THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Country of origin: Canada # ########################################################################### # Sample PBKDF2 usage: # from Crypto.Cipher import AES # from PBKDF2 import PBKDF2 # import os # # salt = os.urandom(8) # 64-bit salt # key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key # iv = os.urandom(16) # 128-bit IV # cipher = AES.new(key, AES.MODE_CBC, iv) # ... # # Sample crypt() usage: # from PBKDF2 import crypt # pwhash = crypt("secret") # alleged_pw = raw_input("Enter password: ") # if pwhash == crypt(alleged_pw, pwhash): # print "Password good" # else: # print "Invalid password" # ########################################################################### # History: # # 2007-07-27 Dwayne C. Litzenberger # - Initial Release (v1.0) # # 2007-07-31 Dwayne C. Litzenberger # - Bugfix release (v1.1) # - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor # function in the previous release) silently truncates all keys to 64 # bytes. The way it was used in the previous release, this would only be # problem if the pseudorandom function that returned values larger than # 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like # anything that silently reduces the security margin from what is # expected. # ########################################################################### __version__ = "1.1" from struct import pack from binascii import b2a_hex from random import randint from base64 import b64encode from beaker.crypto.util import hmac as HMAC, hmac_sha1 as SHA1 def strxor(a, b): return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)]) class PBKDF2(object): """PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation This implementation takes a passphrase and a salt (and optionally an iteration count, a digest module, and a MAC module) and provides a file-like object from which an arbitrarily-sized key can be read. If the passphrase and/or salt are unicode objects, they are encoded as UTF-8 before they are processed. The idea behind PBKDF2 is to derive a cryptographic key from a passphrase and a salt. PBKDF2 may also be used as a strong salted password hash. The 'crypt' function is provided for that purpose. Remember: Keys generated using PBKDF2 are only as strong as the passphrases they are derived from. """ def __init__(self, passphrase, salt, iterations=1000, digestmodule=SHA1, macmodule=HMAC): if not callable(macmodule): macmodule = macmodule.new self.__macmodule = macmodule self.__digestmodule = digestmodule self._setup(passphrase, salt, iterations, self._pseudorandom) def _pseudorandom(self, key, msg): """Pseudorandom function. e.g. HMAC-SHA1""" return self.__macmodule(key=key, msg=msg, digestmod=self.__digestmodule).digest() def read(self, bytes): """Read the specified number of key bytes.""" if self.closed: raise ValueError("file-like object is closed") size = len(self.__buf) blocks = [self.__buf] i = self.__blockNum while size < bytes: i += 1 if i > 0xffffffff: # We could return "" here, but raise OverflowError("derived key too long") block = self.__f(i) blocks.append(block) size += len(block) buf = "".join(blocks) retval = buf[:bytes] self.__buf = buf[bytes:] self.__blockNum = i return retval def __f(self, i): # i must fit within 32 bits assert (1 <= i and i <= 0xffffffff) U = self.__prf(self.__passphrase, self.__salt + pack("!L", i)) result = U for j in xrange(2, 1+self.__iterations): U = self.__prf(self.__passphrase, U) result = strxor(result, U) return result def hexread(self, octets): """Read the specified number of octets. Return them as hexadecimal. Note that len(obj.hexread(n)) == 2*n. """ return b2a_hex(self.read(octets)) def _setup(self, passphrase, salt, iterations, prf): # Sanity checks: # passphrase and salt must be str or unicode (in the latter # case, we convert to UTF-8) if isinstance(passphrase, unicode): passphrase = passphrase.encode("UTF-8") if not isinstance(passphrase, str): raise TypeError("passphrase must be str or unicode") if isinstance(salt, unicode): salt = salt.encode("UTF-8") if not isinstance(salt, str): raise TypeError("salt must be str or unicode") # iterations must be an integer >= 1 if not isinstance(iterations, (int, long)): raise TypeError("iterations must be an integer") if iterations < 1: raise ValueError("iterations must be at least 1") # prf must be callable if not callable(prf): raise TypeError("prf must be callable") self.__passphrase = passphrase self.__salt = salt self.__iterations = iterations self.__prf = prf self.__blockNum = 0 self.__buf = "" self.closed = False def close(self): """Close the stream.""" if not self.closed: del self.__passphrase del self.__salt del self.__iterations del self.__prf del self.__blockNum del self.__buf self.closed = True def crypt(word, salt=None, iterations=None): """PBKDF2-based unix crypt(3) replacement. The number of iterations specified in the salt overrides the 'iterations' parameter. The effective hash length is 192 bits. """ # Generate a (pseudo-)random salt if the user hasn't provided one. if salt is None: salt = _makesalt() # salt must be a string or the us-ascii subset of unicode if isinstance(salt, unicode): salt = salt.encode("us-ascii") if not isinstance(salt, str): raise TypeError("salt must be a string") # word must be a string or unicode (in the latter case, we convert to UTF-8) if isinstance(word, unicode): word = word.encode("UTF-8") if not isinstance(word, str): raise TypeError("word must be a string or unicode") # Try to extract the real salt and iteration count from the salt if salt.startswith("$p5k2$"): (iterations, salt, dummy) = salt.split("$")[2:5] if iterations == "": iterations = 400 else: converted = int(iterations, 16) if iterations != "%x" % converted: # lowercase hex, minimum digits raise ValueError("Invalid salt") iterations = converted if not (iterations >= 1): raise ValueError("Invalid salt") # Make sure the salt matches the allowed character set allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./" for ch in salt: if ch not in allowed: raise ValueError("Illegal character %r in salt" % (ch,)) if iterations is None or iterations == 400: iterations = 400 salt = "$p5k2$$" + salt else: salt = "$p5k2$%x$%s" % (iterations, salt) rawhash = PBKDF2(word, salt, iterations).read(24) return salt + "$" + b64encode(rawhash, "./") # Add crypt as a static method of the PBKDF2 class # This makes it easier to do "from PBKDF2 import PBKDF2" and still use # crypt. PBKDF2.crypt = staticmethod(crypt) def _makesalt(): """Return a 48-bit pseudorandom salt for crypt(). This function is not suitable for generating cryptographic secrets. """ binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)]) return b64encode(binarysalt, "./") def test_pbkdf2(): """Module self-test""" from binascii import a2b_hex # # Test vectors from RFC 3962 # # Test 1 result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16) expected = a2b_hex("cdedb5281bb2f801565a1122b2563515") if result != expected: raise RuntimeError("self-test failed") # Test 2 result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32) expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b" "a7e52ddbc5e5142f708a31e2e62b1e13") if result != expected: raise RuntimeError("self-test failed") # Test 3 result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32) expected = ("139c30c0966bc32ba55fdbf212530ac9" "c5ec59f1a452f5cc9ad940fea0598ed1") if result != expected: raise RuntimeError("self-test failed") # Test 4 result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32) expected = ("9ccad6d468770cd51b10e6a68721be61" "1a8b4d282601db3b36be9246915ec82a") if result != expected: raise RuntimeError("self-test failed") # # Other test vectors # # Chunked read f = PBKDF2("kickstart", "workbench", 256) result = f.read(17) result += f.read(17) result += f.read(1) result += f.read(2) result += f.read(3) expected = PBKDF2("kickstart", "workbench", 256).read(40) if result != expected: raise RuntimeError("self-test failed") # # crypt() test vectors # # crypt 1 result = crypt("cloadm", "exec") expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql' if result != expected: raise RuntimeError("self-test failed") # crypt 2 result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....') expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g' if result != expected: raise RuntimeError("self-test failed") # crypt 3 result = crypt("dcl", "tUsch7fU", iterations=13) expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL" if result != expected: raise RuntimeError("self-test failed") # crypt 4 (unicode) result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2', '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ') expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ' if result != expected: raise RuntimeError("self-test failed") if __name__ == '__main__': test_pbkdf2() # vim:set ts=4 sw=4 sts=4 expandtab: Beaker-1.6.3/beaker/crypto/pycrypto.py0000664000076500000240000000150011542263524017617 0ustar benstaff00000000000000"""Encryption module that uses pycryptopp or pycrypto""" try: # Pycryptopp is preferred over Crypto because Crypto has had # various periods of not being maintained, and pycryptopp uses # the Crypto++ library which is generally considered the 'gold standard' # of crypto implementations from pycryptopp.cipher import aes def aesEncrypt(data, key): cipher = aes.AES(key) return cipher.process(data) # magic. aesDecrypt = aesEncrypt except ImportError: from Crypto.Cipher import AES def aesEncrypt(data, key): cipher = AES.new(key) data = data + (" " * (16 - (len(data) % 16))) return cipher.encrypt(data) def aesDecrypt(data, key): cipher = AES.new(key) return cipher.decrypt(data).rstrip() def getKeyLength(): return 32 Beaker-1.6.3/beaker/crypto/util.py0000664000076500000240000000135611542263524016714 0ustar benstaff00000000000000from warnings import warn from beaker import util try: # Use PyCrypto (if available) from Crypto.Hash import HMAC as hmac, SHA as hmac_sha1 sha1 = hmac_sha1.new except ImportError: # PyCrypto not available. Use the Python standard library. import hmac # When using the stdlib, we have to make sure the hmac version and sha # version are compatible if util.py24: from sha import sha as sha1 import sha as hmac_sha1 else: # NOTE: We have to use the callable with hashlib (hashlib.sha1), # otherwise hmac only accepts the sha module object itself from hashlib import sha1 hmac_sha1 = sha1 if util.py24: from md5 import md5 else: from hashlib import md5 Beaker-1.6.3/beaker/exceptions.py0000664000076500000240000000067111542263524016577 0ustar benstaff00000000000000"""Beaker exception classes""" class BeakerException(Exception): pass class BeakerWarning(RuntimeWarning): """Issued at runtime.""" class CreationAbortedError(Exception): """Deprecated.""" class InvalidCacheBackendError(BeakerException, ImportError): pass class MissingCacheParameter(BeakerException): pass class LockError(BeakerException): pass class InvalidCryptoBackendError(BeakerException): pass Beaker-1.6.3/beaker/ext/0000775000076500000240000000000011723541367014645 5ustar benstaff00000000000000Beaker-1.6.3/beaker/ext/__init__.py0000664000076500000240000000000010761620546016742 0ustar benstaff00000000000000Beaker-1.6.3/beaker/ext/database.py0000664000076500000240000001352111671762332016764 0ustar benstaff00000000000000import cPickle import logging import pickle from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer, null_synchronizer from beaker.util import verify_directory, SyncDict log = logging.getLogger(__name__) sa = None pool = None types = None class DatabaseNamespaceManager(OpenResourceNamespaceManager): metadatas = SyncDict() tables = SyncDict() @classmethod def _init_dependencies(cls): global sa, pool, types if sa is not None: return try: import sqlalchemy as sa import sqlalchemy.pool as pool from sqlalchemy import types except ImportError: raise InvalidCacheBackendError("Database cache backend requires " "the 'sqlalchemy' library") def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` SQLAlchemy compliant db url ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ OpenResourceNamespaceManager.__init__(self, namespace) if sa_opts is None: sa_opts = params if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_db_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('namespace', types.String(255), nullable=False), sa.Column('accessed', types.DateTime, nullable=False), sa.Column('created', types.DateTime, nullable=False), sa.Column('data', types.PickleType, nullable=False), sa.UniqueConstraint('namespace') ) cache.create(checkfirst=True) return cache self.hash = {} self._is_new = False self.loaded = False self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache) def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): return file_synchronizer( identifier ="databasecontainer/funclock/%s/%s" % ( self.namespace, key ), lock_dir = self.lock_dir) def do_open(self, flags, replace): # If we already loaded the data, don't bother loading it again if self.loaded: self.flags = flags return cache = self.cache result = sa.select([cache.c.data], cache.c.namespace==self.namespace ).execute().fetchone() if not result: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = result['data'] except (IOError, OSError, EOFError, cPickle.PickleError, pickle.PickleError): log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): cache = self.cache if self._is_new: cache.insert().execute(namespace=self.namespace, data=self.hash, accessed=datetime.now(), created=datetime.now()) self._is_new = False else: cache.update(cache.c.namespace==self.namespace).execute( data=self.hash, accessed=datetime.now()) self.flags = None def do_remove(self): cache = self.cache cache.delete(cache.c.namespace==self.namespace).execute() self.hash = {} # We can retain the fact that we did a load attempt, but since the # file is gone this will be a new namespace should it be saved. self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return self.hash.has_key(key) def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class DatabaseContainer(Container): namespace_manager = DatabaseNamespaceManager Beaker-1.6.3/beaker/ext/google.py0000664000076500000240000000755711542263524016504 0ustar benstaff00000000000000import cPickle import logging from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError from beaker.synchronization import null_synchronizer log = logging.getLogger(__name__) db = None class GoogleNamespaceManager(OpenResourceNamespaceManager): tables = {} @classmethod def _init_dependencies(cls): global db if db is not None: return try: db = __import__('google.appengine.ext.db').appengine.ext.db except ImportError: raise InvalidCacheBackendError("Datastore cache backend requires the " "'google.appengine.ext' library") def __init__(self, namespace, table_name='beaker_cache', **params): """Creates a datastore namespace manager""" OpenResourceNamespaceManager.__init__(self, namespace) def make_cache(): table_dict = dict(created=db.DateTimeProperty(), accessed=db.DateTimeProperty(), data=db.BlobProperty()) table = type(table_name, (db.Model,), table_dict) return table self.table_name = table_name self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache()) self.hash = {} self._is_new = False self.loaded = False self.log_debug = logging.DEBUG >= log.getEffectiveLevel() # Google wants namespaces to start with letters, change the namespace # to start with a letter self.namespace = 'p%s' % self.namespace def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): # this is weird, should probably be present return null_synchronizer() def do_open(self, flags, replace): # If we already loaded the data, don't bother loading it again if self.loaded: self.flags = flags return item = self.cache.get_by_key_name(self.namespace) if not item: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = cPickle.loads(str(item.data)) except (IOError, OSError, EOFError, cPickle.PickleError): if self.log_debug: log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): if self._is_new: item = self.cache(key_name=self.namespace) item.data = cPickle.dumps(self.hash) item.created = datetime.now() item.accessed = datetime.now() item.put() self._is_new = False else: item = self.cache.get_by_key_name(self.namespace) item.data = cPickle.dumps(self.hash) item.accessed = datetime.now() item.put() self.flags = None def do_remove(self): item = self.cache.get_by_key_name(self.namespace) item.delete() self.hash = {} # We can retain the fact that we did a load attempt, but since the # file is gone this will be a new namespace should it be saved. self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return self.hash.has_key(key) def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class GoogleContainer(Container): namespace_class = GoogleNamespaceManager Beaker-1.6.3/beaker/ext/memcached.py0000664000076500000240000001213111671762332017122 0ustar benstaff00000000000000from __future__ import with_statement from beaker.container import NamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer from beaker.util import verify_directory, SyncDict import warnings _client_libs = {} def _load_client(name='auto'): if name in _client_libs: return _client_libs[name] def _pylibmc(): global pylibmc import pylibmc return pylibmc def _cmemcache(): global cmemcache import cmemcache warnings.warn("cmemcache is known to have serious " "concurrency issues; consider using 'memcache' " "or 'pylibmc'") return cmemcache def _memcache(): global memcache import memcache return memcache def _auto(): for _client in (_pylibmc, _cmemcache, _memcache): try: return _client() except ImportError: pass else: raise InvalidCacheBackendError( "Memcached cache backend requires one " "of: 'pylibmc' or 'memcache' to be installed.") clients = { 'pylibmc': _pylibmc, 'cmemcache': _cmemcache, 'memcache': _memcache, 'auto': _auto } _client_libs[name] = clib = clients[name]() return clib class MemcachedNamespaceManager(NamespaceManager): """Provides the :class:`.NamespaceManager` API over a memcache client library.""" clients = SyncDict() def __new__(cls, *args, **kw): memcache_module = kw.pop('memcache_module', 'auto') memcache_client = _load_client(memcache_module) if memcache_module == 'pylibmc' or \ memcache_client.__name__.startswith('pylibmc'): return object.__new__(PyLibMCNamespaceManager) else: return object.__new__(MemcachedNamespaceManager) def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';')) def get_creation_lock(self, key): return file_synchronizer( identifier="memcachedcontainer/funclock/%s/%s" % (self.namespace, key), lock_dir=self.lock_dir) def _format_key(self, key): return self.namespace + '_' + key.replace(' ', '\302\267') def __getitem__(self, key): return self.mc.get(self._format_key(key)) def __contains__(self, key): value = self.mc.get(self._format_key(key)) return value is not None def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): if expiretime: self.mc.set(self._format_key(key), value, time=expiretime) else: self.mc.set(self._format_key(key), value) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): self.mc.delete(self._format_key(key)) def do_remove(self): self.mc.flush_all() def keys(self): raise NotImplementedError( "Memcache caching does not " "support iteration of all cache keys") class PyLibMCNamespaceManager(MemcachedNamespaceManager): """Provide thread-local support for pylibmc.""" def __init__(self, *arg, **kw): super(PyLibMCNamespaceManager, self).__init__(*arg, **kw) self.pool = pylibmc.ThreadMappedPool(self.mc) def __getitem__(self, key): with self.pool.reserve() as mc: return mc.get(self._format_key(key)) def __contains__(self, key): with self.pool.reserve() as mc: value = mc.get(self._format_key(key)) return value is not None def has_key(self, key): return key in self def set_value(self, key, value, expiretime=None): with self.pool.reserve() as mc: if expiretime: mc.set(self._format_key(key), value, time=expiretime) else: mc.set(self._format_key(key), value) def __setitem__(self, key, value): self.set_value(key, value) def __delitem__(self, key): with self.pool.reserve() as mc: mc.delete(self._format_key(key)) def do_remove(self): with self.pool.reserve() as mc: mc.flush_all() class MemcachedContainer(Container): """Container class which invokes :class:`.MemcacheNamespaceManager`.""" namespace_class = MemcachedNamespaceManager Beaker-1.6.3/beaker/ext/sqla.py0000664000076500000240000001100511542263524016147 0ustar benstaff00000000000000import cPickle import logging import pickle from datetime import datetime from beaker.container import OpenResourceNamespaceManager, Container from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter from beaker.synchronization import file_synchronizer, null_synchronizer from beaker.util import verify_directory, SyncDict log = logging.getLogger(__name__) sa = None class SqlaNamespaceManager(OpenResourceNamespaceManager): binds = SyncDict() tables = SyncDict() @classmethod def _init_dependencies(cls): global sa if sa is not None: return try: import sqlalchemy as sa except ImportError: raise InvalidCacheBackendError("SQLAlchemy, which is required by " "this backend, is not installed") def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, **kwargs): """Create a namespace manager for use with a database table via SQLAlchemy. ``bind`` SQLAlchemy ``Engine`` or ``Connection`` object ``table`` SQLAlchemy ``Table`` object in which to store namespace data. This should usually be something created by ``make_cache_table``. """ OpenResourceNamespaceManager.__init__(self, namespace) if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_db_lock" if self.lock_dir: verify_directory(self.lock_dir) self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), lambda: table) self.hash = {} self._is_new = False self.loaded = False def get_access_lock(self): return null_synchronizer() def get_creation_lock(self, key): return file_synchronizer( identifier ="databasecontainer/funclock/%s" % self.namespace, lock_dir=self.lock_dir) def do_open(self, flags, replace): if self.loaded: self.flags = flags return select = sa.select([self.table.c.data], (self.table.c.namespace == self.namespace)) result = self.bind.execute(select).fetchone() if not result: self._is_new = True self.hash = {} else: self._is_new = False try: self.hash = result['data'] except (IOError, OSError, EOFError, cPickle.PickleError, pickle.PickleError): log.debug("Couln't load pickle data, creating new storage") self.hash = {} self._is_new = True self.flags = flags self.loaded = True def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): if self._is_new: insert = self.table.insert() self.bind.execute(insert, namespace=self.namespace, data=self.hash, accessed=datetime.now(), created=datetime.now()) self._is_new = False else: update = self.table.update(self.table.c.namespace == self.namespace) self.bind.execute(update, data=self.hash, accessed=datetime.now()) self.flags = None def do_remove(self): delete = self.table.delete(self.table.c.namespace == self.namespace) self.bind.execute(delete) self.hash = {} self._is_new = True def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return self.hash.has_key(key) def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class SqlaContainer(Container): namespace_manager = SqlaNamespaceManager def make_cache_table(metadata, table_name='beaker_cache'): """Return a ``Table`` object suitable for storing cached values for the namespace manager. Do not create the table.""" return sa.Table(table_name, metadata, sa.Column('namespace', sa.String(255), primary_key=True), sa.Column('accessed', sa.DateTime, nullable=False), sa.Column('created', sa.DateTime, nullable=False), sa.Column('data', sa.PickleType, nullable=False)) Beaker-1.6.3/beaker/middleware.py0000664000076500000240000001443311650050662016531 0ustar benstaff00000000000000import warnings try: from paste.registry import StackedObjectProxy beaker_session = StackedObjectProxy(name="Beaker Session") beaker_cache = StackedObjectProxy(name="Cache Manager") except: beaker_cache = None beaker_session = None from beaker.cache import CacheManager from beaker.session import Session, SessionObject from beaker.util import coerce_cache_params, coerce_session_params, \ parse_cache_config_options class CacheMiddleware(object): cache = beaker_cache def __init__(self, app, config=None, environ_key='beaker.cache', **kwargs): """Initialize the Cache Middleware The Cache middleware will make a CacheManager instance available every request under the ``environ['beaker.cache']`` key by default. The location in environ can be changed by setting ``environ_key``. ``config`` dict All settings should be prefixed by 'cache.'. This method of passing variables is intended for Paste and other setups that accumulate multiple component settings in a single dictionary. If config contains *no cache. prefixed args*, then *all* of the config options will be used to intialize the Cache objects. ``environ_key`` Location where the Cache instance will keyed in the WSGI environ ``**kwargs`` All keyword arguments are assumed to be cache settings and will override any settings found in ``config`` """ self.app = app config = config or {} self.options = {} # Update the options with the parsed config self.options.update(parse_cache_config_options(config)) # Add any options from kwargs, but leave out the defaults this # time self.options.update( parse_cache_config_options(kwargs, include_defaults=False)) # Assume all keys are intended for cache if none are prefixed with # 'cache.' if not self.options and config: self.options = config self.options.update(kwargs) self.cache_manager = CacheManager(**self.options) self.environ_key = environ_key def __call__(self, environ, start_response): if environ.get('paste.registry'): if environ['paste.registry'].reglist: environ['paste.registry'].register(self.cache, self.cache_manager) environ[self.environ_key] = self.cache_manager return self.app(environ, start_response) class SessionMiddleware(object): session = beaker_session def __init__(self, wrap_app, config=None, environ_key='beaker.session', **kwargs): """Initialize the Session Middleware The Session middleware will make a lazy session instance available every request under the ``environ['beaker.session']`` key by default. The location in environ can be changed by setting ``environ_key``. ``config`` dict All settings should be prefixed by 'session.'. This method of passing variables is intended for Paste and other setups that accumulate multiple component settings in a single dictionary. If config contains *no cache. prefixed args*, then *all* of the config options will be used to intialize the Cache objects. ``environ_key`` Location where the Session instance will keyed in the WSGI environ ``**kwargs`` All keyword arguments are assumed to be session settings and will override any settings found in ``config`` """ config = config or {} # Load up the default params self.options = dict(invalidate_corrupt=True, type=None, data_dir=None, key='beaker.session.id', timeout=None, secret=None, log_file=None) # Pull out any config args meant for beaker session. if there are any for dct in [config, kwargs]: for key, val in dct.iteritems(): if key.startswith('beaker.session.'): self.options[key[15:]] = val if key.startswith('session.'): self.options[key[8:]] = val if key.startswith('session_'): warnings.warn('Session options should start with session. ' 'instead of session_.', DeprecationWarning, 2) self.options[key[8:]] = val # Coerce and validate session params coerce_session_params(self.options) # Assume all keys are intended for cache if none are prefixed with # 'cache.' if not self.options and config: self.options = config self.options.update(kwargs) self.wrap_app = self.app = wrap_app self.environ_key = environ_key def __call__(self, environ, start_response): session = SessionObject(environ, **self.options) if environ.get('paste.registry'): if environ['paste.registry'].reglist: environ['paste.registry'].register(self.session, session) environ[self.environ_key] = session environ['beaker.get_session'] = self._get_session if 'paste.testing_variables' in environ and 'webtest_varname' in self.options: environ['paste.testing_variables'][self.options['webtest_varname']] = session def session_start_response(status, headers, exc_info = None): if session.accessed(): session.persist() if session.__dict__['_headers']['set_cookie']: cookie = session.__dict__['_headers']['cookie_out'] if cookie: headers.append(('Set-cookie', cookie)) return start_response(status, headers, exc_info) return self.wrap_app(environ, session_start_response) def _get_session(self): return Session({}, use_cookies=False, **self.options) def session_filter_factory(global_conf, **kwargs): def filter(app): return SessionMiddleware(app, global_conf, **kwargs) return filter def session_filter_app_factory(app, global_conf, **kwargs): return SessionMiddleware(app, global_conf, **kwargs) Beaker-1.6.3/beaker/session.py0000664000076500000240000006043111723461175016104 0ustar benstaff00000000000000import Cookie import os from datetime import datetime, timedelta import time from beaker.crypto import hmac as HMAC, hmac_sha1 as SHA1, md5 from beaker import crypto, util from beaker.cache import clsmap from beaker.exceptions import BeakerException, InvalidCryptoBackendError from base64 import b64encode, b64decode __all__ = ['SignedCookie', 'Session'] try: import uuid def _session_id(): return uuid.uuid4().hex except ImportError: import random if hasattr(os, 'getpid'): getpid = os.getpid else: def getpid(): return '' def _session_id(): id_str = "%f%s%f%s" % ( time.time(), id({}), random.random(), getpid() ) if util.py3k: return md5( md5( id_str.encode('ascii') ).hexdigest().encode('ascii') ).hexdigest() else: return md5(md5(id_str).hexdigest()).hexdigest() class SignedCookie(Cookie.BaseCookie): """Extends python cookie to give digital signature support""" def __init__(self, secret, input=None): self.secret = secret Cookie.BaseCookie.__init__(self, input) def value_decode(self, val): val = val.strip('"') sig = HMAC.new(self.secret, val[40:], SHA1).hexdigest() # Avoid timing attacks invalid_bits = 0 input_sig = val[:40] if len(sig) != len(input_sig): return None, val for a, b in zip(sig, input_sig): invalid_bits += a != b if invalid_bits: return None, val else: return val[40:], val def value_encode(self, val): sig = HMAC.new(self.secret, val, SHA1).hexdigest() return str(val), ("%s%s" % (sig, val)) class Session(dict): """Session object that uses container package for storage. :param invalidate_corrupt: How to handle corrupt data when loading. When set to True, then corrupt data will be silently invalidated and a new session created, otherwise invalid data will cause an exception. :type invalidate_corrupt: bool :param use_cookies: Whether or not cookies should be created. When set to False, it is assumed the user will handle storing the session on their own. :type use_cookies: bool :param type: What data backend type should be used to store the underlying session data :param key: The name the cookie should be set to. :param timeout: How long session data is considered valid. This is used regardless of the cookie being present or not to determine whether session data is still valid. :type timeout: int :param cookie_domain: Domain to use for the cookie. :param secure: Whether or not the cookie should only be sent over SSL. :param httponly: Whether or not the cookie should only be accessible by the browser not by JavaScript. :param encrypt_key: The key to use for the local session encryption, if not provided the session will not be encrypted. :param validate_key: The key used to sign the local encrypted session """ def __init__(self, request, id=None, invalidate_corrupt=False, use_cookies=True, type=None, data_dir=None, key='beaker.session.id', timeout=None, cookie_expires=True, cookie_domain=None, secret=None, secure=False, namespace_class=None, httponly=False, encrypt_key=None, validate_key=None, **namespace_args): if not type: if data_dir: self.type = 'file' else: self.type = 'memory' else: self.type = type self.namespace_class = namespace_class or clsmap[self.type] self.namespace_args = namespace_args self.request = request self.data_dir = data_dir self.key = key self.timeout = timeout self.use_cookies = use_cookies self.cookie_expires = cookie_expires # Default cookie domain/path self._domain = cookie_domain self._path = '/' self.was_invalidated = False self.secret = secret self.secure = secure self.httponly = httponly self.encrypt_key = encrypt_key self.validate_key = validate_key self.id = id self.accessed_dict = {} self.invalidate_corrupt = invalidate_corrupt if self.use_cookies: cookieheader = request.get('cookie', '') if secret: try: self.cookie = SignedCookie(secret, input=cookieheader) except Cookie.CookieError: self.cookie = SignedCookie(secret, input=None) else: self.cookie = Cookie.SimpleCookie(input=cookieheader) if not self.id and self.key in self.cookie: self.id = self.cookie[self.key].value self.is_new = self.id is None if self.is_new: self._create_id() self['_accessed_time'] = self['_creation_time'] = time.time() else: try: self.load() except Exception, e: if invalidate_corrupt: util.warn( "Invalidating corrupt session %s; " "error was: %s. Set invalidate_corrupt=False " "to propagate this exception." % (self.id, e)) self.invalidate() else: raise def _set_cookie_values(self, expires=None): self.cookie[self.key] = self.id if self._domain: self.cookie[self.key]['domain'] = self._domain if self.secure: self.cookie[self.key]['secure'] = True self._set_cookie_http_only() self.cookie[self.key]['path'] = self._path self._set_cookie_expires(expires) def _set_cookie_expires(self, expires): if expires is None: if self.cookie_expires is not True: if self.cookie_expires is False: expires = datetime.fromtimestamp(0x7FFFFFFF) elif isinstance(self.cookie_expires, timedelta): expires = datetime.utcnow() + self.cookie_expires elif isinstance(self.cookie_expires, datetime): expires = self.cookie_expires else: raise ValueError("Invalid argument for cookie_expires: %s" % repr(self.cookie_expires)) else: expires = None if expires is not None: if not self.cookie or self.key not in self.cookie: self.cookie[self.key] = self.id self.cookie[self.key]['expires'] = \ expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT") return expires def _update_cookie_out(self, set_cookie=True): self.request['cookie_out'] = self.cookie[self.key].output(header='') self.request['set_cookie'] = set_cookie def _set_cookie_http_only(self): try: if self.httponly: self.cookie[self.key]['httponly'] = True except Cookie.CookieError, e: if 'Invalid Attribute httponly' not in str(e): raise util.warn('Python 2.6+ is required to use httponly') def _create_id(self, set_new=True): self.id = _session_id() if set_new: self.is_new = True self.last_accessed = None if self.use_cookies: self._set_cookie_values() sc = set_new == False self._update_cookie_out(set_cookie=sc) @property def created(self): return self['_creation_time'] def _set_domain(self, domain): self['_domain'] = domain self.cookie[self.key]['domain'] = domain self._update_cookie_out() def _get_domain(self): return self._domain domain = property(_get_domain, _set_domain) def _set_path(self, path): self['_path'] = self._path = path self.cookie[self.key]['path'] = path self._update_cookie_out() def _get_path(self): return self._path path = property(_get_path, _set_path) def _encrypt_data(self, session_data=None): """Serialize, encipher, and base64 the session dict""" session_data = session_data or self.copy() if self.encrypt_key: nonce = b64encode(os.urandom(40))[:8] encrypt_key = crypto.generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1) data = util.pickle.dumps(session_data, 2) return nonce + b64encode(crypto.aesEncrypt(data, encrypt_key)) else: data = util.pickle.dumps(session_data, 2) return b64encode(data) def _decrypt_data(self, session_data): """Bas64, decipher, then un-serialize the data for the session dict""" if self.encrypt_key: try: nonce = session_data[:8] encrypt_key = crypto.generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1) payload = b64decode(session_data[8:]) data = crypto.aesDecrypt(payload, encrypt_key) except: # As much as I hate a bare except, we get some insane errors # here that get tossed when crypto fails, so we raise the # 'right' exception if self.invalidate_corrupt: return None else: raise try: return util.pickle.loads(data) except: if self.invalidate_corrupt: return None else: raise else: data = b64decode(session_data) return util.pickle.loads(data) def _delete_cookie(self): self.request['set_cookie'] = True expires = datetime.utcnow() - timedelta(365) self._set_cookie_values(expires) self._update_cookie_out() def delete(self): """Deletes the session from the persistent storage, and sends an expired cookie out""" if self.use_cookies: self._delete_cookie() self.clear() def invalidate(self): """Invalidates this session, creates a new session id, returns to the is_new state""" self.clear() self.was_invalidated = True self._create_id() self.load() def load(self): "Loads the data from this session from persistent storage" self.namespace = self.namespace_class(self.id, data_dir=self.data_dir, digest_filenames=False, **self.namespace_args) now = time.time() if self.use_cookies: self.request['set_cookie'] = True self.namespace.acquire_read_lock() timed_out = False try: self.clear() try: session_data = self.namespace['session'] if (session_data is not None and self.encrypt_key): session_data = self._decrypt_data(session_data) # Memcached always returns a key, its None when its not # present if session_data is None: session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True except (KeyError, TypeError): session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True if session_data is None or len(session_data) == 0: session_data = { '_creation_time': now, '_accessed_time': now } self.is_new = True if self.timeout is not None and \ now - session_data['_accessed_time'] > self.timeout: timed_out = True else: # Properly set the last_accessed time, which is different # than the *currently* _accessed_time if self.is_new or '_accessed_time' not in session_data: self.last_accessed = None else: self.last_accessed = session_data['_accessed_time'] # Update the current _accessed_time session_data['_accessed_time'] = now # Set the path if applicable if '_path' in session_data: self._path = session_data['_path'] self.update(session_data) self.accessed_dict = session_data.copy() finally: self.namespace.release_read_lock() if timed_out: self.invalidate() def save(self, accessed_only=False): """Saves the data for this session to persistent storage If accessed_only is True, then only the original data loaded at the beginning of the request will be saved, with the updated last accessed time. """ # Look to see if its a new session that was only accessed # Don't save it under that case if accessed_only and self.is_new: return None # this session might not have a namespace yet or the session id # might have been regenerated if not hasattr(self, 'namespace') or self.namespace.namespace != self.id: self.namespace = self.namespace_class( self.id, data_dir=self.data_dir, digest_filenames=False, **self.namespace_args) self.namespace.acquire_write_lock(replace=True) try: if accessed_only: data = dict(self.accessed_dict.items()) else: data = dict(self.items()) if self.encrypt_key: data = self._encrypt_data(data) # Save the data if not data and 'session' in self.namespace: del self.namespace['session'] else: self.namespace['session'] = data finally: self.namespace.release_write_lock() if self.use_cookies and self.is_new: self.request['set_cookie'] = True def revert(self): """Revert the session to its original state from its first access in the request""" self.clear() self.update(self.accessed_dict) def regenerate_id(self): """ creates a new session id, retains all session data Its a good security practice to regnerate the id after a client elevates priviliges. """ self._create_id(set_new=False) # TODO: I think both these methods should be removed. They're from # the original mod_python code i was ripping off but they really # have no use here. def lock(self): """Locks this session against other processes/threads. This is automatic when load/save is called. ***use with caution*** and always with a corresponding 'unlock' inside a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.acquire_write_lock() def unlock(self): """Unlocks this session against other processes/threads. This is automatic when load/save is called. ***use with caution*** and always within a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.release_write_lock() class CookieSession(Session): """Pure cookie-based session Options recognized when using cookie-based sessions are slightly more restricted than general sessions. :param key: The name the cookie should be set to. :param timeout: How long session data is considered valid. This is used regardless of the cookie being present or not to determine whether session data is still valid. :type timeout: int :param cookie_domain: Domain to use for the cookie. :param secure: Whether or not the cookie should only be sent over SSL. :param httponly: Whether or not the cookie should only be accessible by the browser not by JavaScript. :param encrypt_key: The key to use for the local session encryption, if not provided the session will not be encrypted. :param validate_key: The key used to sign the local encrypted session """ def __init__(self, request, key='beaker.session.id', timeout=None, cookie_expires=True, cookie_domain=None, encrypt_key=None, validate_key=None, secure=False, httponly=False, **kwargs): if not crypto.has_aes and encrypt_key: raise InvalidCryptoBackendError("No AES library is installed, can't generate " "encrypted cookie-only Session.") self.request = request self.key = key self.timeout = timeout self.cookie_expires = cookie_expires self.encrypt_key = encrypt_key self.validate_key = validate_key self.request['set_cookie'] = False self.secure = secure self.httponly = httponly self._domain = cookie_domain self._path = '/' try: cookieheader = request['cookie'] except KeyError: cookieheader = '' if validate_key is None: raise BeakerException("No validate_key specified for Cookie only " "Session.") try: self.cookie = SignedCookie(validate_key, input=cookieheader) except Cookie.CookieError: self.cookie = SignedCookie(validate_key, input=None) self['_id'] = _session_id() self.is_new = True # If we have a cookie, load it if self.key in self.cookie and self.cookie[self.key].value is not None: self.is_new = False try: cookie_data = self.cookie[self.key].value self.update(self._decrypt_data(cookie_data)) self._path = self.get('_path', '/') except: pass if self.timeout is not None and time.time() - \ self['_accessed_time'] > self.timeout: self.clear() self.accessed_dict = self.copy() self._create_cookie() def created(self): return self['_creation_time'] created = property(created) def id(self): return self['_id'] id = property(id) def _set_domain(self, domain): self['_domain'] = domain self._domain = domain def _get_domain(self): return self._domain domain = property(_get_domain, _set_domain) def _set_path(self, path): self['_path'] = self._path = path def _get_path(self): return self._path path = property(_get_path, _set_path) def save(self, accessed_only=False): """Saves the data for this session to persistent storage""" if accessed_only and self.is_new: return if accessed_only: self.clear() self.update(self.accessed_dict) self._create_cookie() def expire(self): """Delete the 'expires' attribute on this Session, if any.""" self.pop('_expires', None) def _create_cookie(self): if '_creation_time' not in self: self['_creation_time'] = time.time() if '_id' not in self: self['_id'] = _session_id() self['_accessed_time'] = time.time() val = self._encrypt_data() if len(val) > 4064: raise BeakerException("Cookie value is too long to store") self.cookie[self.key] = val if '_expires' in self: expires = self['_expires'] else: expires = None expires = self._set_cookie_expires(expires) if expires is not None: self['_expires'] = expires if '_domain' in self: self.cookie[self.key]['domain'] = self['_domain'] elif self._domain: self.cookie[self.key]['domain'] = self._domain if self.secure: self.cookie[self.key]['secure'] = True self._set_cookie_http_only() self.cookie[self.key]['path'] = self.get('_path', '/') self.request['cookie_out'] = self.cookie[self.key].output(header='') self.request['set_cookie'] = True def delete(self): """Delete the cookie, and clear the session""" # Send a delete cookie request self._delete_cookie() self.clear() def invalidate(self): """Clear the contents and start a new session""" self.delete() self['_id'] = _session_id() class SessionObject(object): """Session proxy/lazy creator This object proxies access to the actual session object, so that in the case that the session hasn't been used before, it will be setup. This avoid creating and loading the session from persistent storage unless its actually used during the request. """ def __init__(self, environ, **params): self.__dict__['_params'] = params self.__dict__['_environ'] = environ self.__dict__['_sess'] = None self.__dict__['_headers'] = {} def _session(self): """Lazy initial creation of session object""" if self.__dict__['_sess'] is None: params = self.__dict__['_params'] environ = self.__dict__['_environ'] self.__dict__['_headers'] = req = {'cookie_out': None} req['cookie'] = environ.get('HTTP_COOKIE') if params.get('type') == 'cookie': self.__dict__['_sess'] = CookieSession(req, **params) else: self.__dict__['_sess'] = Session(req, use_cookies=True, **params) return self.__dict__['_sess'] def __getattr__(self, attr): return getattr(self._session(), attr) def __setattr__(self, attr, value): setattr(self._session(), attr, value) def __delattr__(self, name): self._session().__delattr__(name) def __getitem__(self, key): return self._session()[key] def __setitem__(self, key, value): self._session()[key] = value def __delitem__(self, key): self._session().__delitem__(key) def __repr__(self): return self._session().__repr__() def __iter__(self): """Only works for proxying to a dict""" return iter(self._session().keys()) def __contains__(self, key): return key in self._session() def get_by_id(self, id): """Loads a session given a session ID""" params = self.__dict__['_params'] session = Session({}, use_cookies=False, id=id, **params) if session.is_new: return None return session def save(self): self.__dict__['_dirty'] = True def delete(self): self.__dict__['_dirty'] = True self._session().delete() def persist(self): """Persist the session to the storage If its set to autosave, then the entire session will be saved regardless of if save() has been called. Otherwise, just the accessed time will be updated if save() was not called, or the session will be saved if save() was called. """ if self.__dict__['_params'].get('auto'): self._session().save() else: if self.__dict__.get('_dirty'): self._session().save() else: self._session().save(accessed_only=True) def dirty(self): return self.__dict__.get('_dirty', False) def accessed(self): """Returns whether or not the session has been accessed""" return self.__dict__['_sess'] is not None Beaker-1.6.3/beaker/synchronization.py0000664000076500000240000002607011542263524017660 0ustar benstaff00000000000000"""Synchronization functions. File- and mutex-based mutual exclusion synchronizers are provided, as well as a name-based mutex which locks within an application based on a string name. """ import os import sys import tempfile try: import threading as _threading except ImportError: import dummy_threading as _threading # check for fcntl module try: sys.getwindowsversion() has_flock = False except: try: import fcntl has_flock = True except ImportError: has_flock = False from beaker import util from beaker.exceptions import LockError __all__ = ["file_synchronizer", "mutex_synchronizer", "null_synchronizer", "NameLock", "_threading"] class NameLock(object): """a proxy for an RLock object that is stored in a name based registry. Multiple threads can get a reference to the same RLock based on the name alone, and synchronize operations related to that name. """ locks = util.WeakValuedRegistry() class NLContainer(object): def __init__(self, reentrant): if reentrant: self.lock = _threading.RLock() else: self.lock = _threading.Lock() def __call__(self): return self.lock def __init__(self, identifier = None, reentrant = False): if identifier is None: self._lock = NameLock.NLContainer(reentrant) else: self._lock = NameLock.locks.get(identifier, NameLock.NLContainer, reentrant) def acquire(self, wait = True): return self._lock().acquire(wait) def release(self): self._lock().release() _synchronizers = util.WeakValuedRegistry() def _synchronizer(identifier, cls, **kwargs): return _synchronizers.sync_get((identifier, cls), cls, identifier, **kwargs) def file_synchronizer(identifier, **kwargs): if not has_flock or 'lock_dir' not in kwargs: return mutex_synchronizer(identifier) else: return _synchronizer(identifier, FileSynchronizer, **kwargs) def mutex_synchronizer(identifier, **kwargs): return _synchronizer(identifier, ConditionSynchronizer, **kwargs) class null_synchronizer(object): """A 'null' synchronizer, which provides the :class:`.SynchronizerImpl` interface without any locking. """ def acquire_write_lock(self, wait=True): return True def acquire_read_lock(self): pass def release_write_lock(self): pass def release_read_lock(self): pass acquire = acquire_write_lock release = release_write_lock class SynchronizerImpl(object): """Base class for a synchronization object that allows multiple readers, single writers. """ def __init__(self): self._state = util.ThreadLocal() class SyncState(object): __slots__ = 'reentrantcount', 'writing', 'reading' def __init__(self): self.reentrantcount = 0 self.writing = False self.reading = False def state(self): if not self._state.has(): state = SynchronizerImpl.SyncState() self._state.put(state) return state else: return self._state.get() state = property(state) def release_read_lock(self): state = self.state if state.writing: raise LockError("lock is in writing state") if not state.reading: raise LockError("lock is not in reading state") if state.reentrantcount == 1: self.do_release_read_lock() state.reading = False state.reentrantcount -= 1 def acquire_read_lock(self, wait = True): state = self.state if state.writing: raise LockError("lock is in writing state") if state.reentrantcount == 0: x = self.do_acquire_read_lock(wait) if (wait or x): state.reentrantcount += 1 state.reading = True return x elif state.reading: state.reentrantcount += 1 return True def release_write_lock(self): state = self.state if state.reading: raise LockError("lock is in reading state") if not state.writing: raise LockError("lock is not in writing state") if state.reentrantcount == 1: self.do_release_write_lock() state.writing = False state.reentrantcount -= 1 release = release_write_lock def acquire_write_lock(self, wait = True): state = self.state if state.reading: raise LockError("lock is in reading state") if state.reentrantcount == 0: x = self.do_acquire_write_lock(wait) if (wait or x): state.reentrantcount += 1 state.writing = True return x elif state.writing: state.reentrantcount += 1 return True acquire = acquire_write_lock def do_release_read_lock(self): raise NotImplementedError() def do_acquire_read_lock(self): raise NotImplementedError() def do_release_write_lock(self): raise NotImplementedError() def do_acquire_write_lock(self): raise NotImplementedError() class FileSynchronizer(SynchronizerImpl): """A synchronizer which locks using flock(). """ def __init__(self, identifier, lock_dir): super(FileSynchronizer, self).__init__() self._filedescriptor = util.ThreadLocal() if lock_dir is None: lock_dir = tempfile.gettempdir() else: lock_dir = lock_dir self.filename = util.encoded_path( lock_dir, [identifier], extension='.lock' ) def _filedesc(self): return self._filedescriptor.get() _filedesc = property(_filedesc) def _open(self, mode): filedescriptor = self._filedesc if filedescriptor is None: filedescriptor = os.open(self.filename, mode) self._filedescriptor.put(filedescriptor) return filedescriptor def do_acquire_read_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_RDONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_SH) return True def do_acquire_write_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_WRONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_EX | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_EX) return True def do_release_read_lock(self): self._release_all_locks() def do_release_write_lock(self): self._release_all_locks() def _release_all_locks(self): filedescriptor = self._filedesc if filedescriptor is not None: fcntl.flock(filedescriptor, fcntl.LOCK_UN) os.close(filedescriptor) self._filedescriptor.remove() class ConditionSynchronizer(SynchronizerImpl): """a synchronizer using a Condition.""" def __init__(self, identifier): super(ConditionSynchronizer, self).__init__() # counts how many asynchronous methods are executing self.async = 0 # pointer to thread that is the current sync operation self.current_sync_operation = None # condition object to lock on self.condition = _threading.Condition(_threading.Lock()) def do_acquire_read_lock(self, wait = True): self.condition.acquire() try: # see if a synchronous operation is waiting to start # or is already running, in which case we wait (or just # give up and return) if wait: while self.current_sync_operation is not None: self.condition.wait() else: if self.current_sync_operation is not None: return False self.async += 1 finally: self.condition.release() if not wait: return True def do_release_read_lock(self): self.condition.acquire() try: self.async -= 1 # check if we are the last asynchronous reader thread # out the door. if self.async == 0: # yes. so if a sync operation is waiting, notifyAll to wake # it up if self.current_sync_operation is not None: self.condition.notifyAll() elif self.async < 0: raise LockError("Synchronizer error - too many " "release_read_locks called") finally: self.condition.release() def do_acquire_write_lock(self, wait = True): self.condition.acquire() try: # here, we are not a synchronous reader, and after returning, # assuming waiting or immediate availability, we will be. if wait: # if another sync is working, wait while self.current_sync_operation is not None: self.condition.wait() else: # if another sync is working, # we dont want to wait, so forget it if self.current_sync_operation is not None: return False # establish ourselves as the current sync # this indicates to other read/write operations # that they should wait until this is None again self.current_sync_operation = _threading.currentThread() # now wait again for asyncs to finish if self.async > 0: if wait: # wait self.condition.wait() else: # we dont want to wait, so forget it self.current_sync_operation = None return False finally: self.condition.release() if not wait: return True def do_release_write_lock(self): self.condition.acquire() try: if self.current_sync_operation is not _threading.currentThread(): raise LockError("Synchronizer error - current thread doesnt " "have the write lock") # reset the current sync operation so # another can get it self.current_sync_operation = None # tell everyone to get ready self.condition.notifyAll() finally: # everyone go !! self.condition.release() Beaker-1.6.3/beaker/util.py0000664000076500000240000003046111723457647015407 0ustar benstaff00000000000000"""Beaker utilities""" try: import thread as _thread import threading as _threading except ImportError: import dummy_thread as _thread import dummy_threading as _threading from datetime import datetime, timedelta import os import re import string import types import weakref import warnings import sys import inspect py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0) py24 = sys.version_info < (2,5) jython = sys.platform.startswith('java') if py3k or jython: import pickle else: import cPickle as pickle from beaker.converters import asbool from beaker import exceptions from threading import local as _tlocal __all__ = ["ThreadLocal", "Registry", "WeakValuedRegistry", "SyncDict", "encoded_path", "verify_directory"] def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. """ fn.__name__ = name return fn def skip_if(predicate, reason=None): """Skip a test if predicate is true.""" reason = reason or predicate.__name__ from nose import SkipTest def decorate(fn): fn_name = fn.__name__ def maybe(*args, **kw): if predicate(): msg = "'%s' skipped: %s" % ( fn_name, reason) raise SkipTest(msg) else: return fn(*args, **kw) return function_named(maybe, fn_name) return decorate def assert_raises(except_cls, callable_, *args, **kw): """Assert the given exception is raised by the given function + arguments.""" try: callable_(*args, **kw) success = False except except_cls, e: success = True # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" def verify_directory(dir): """verifies and creates a directory. tries to ignore collisions with other threads and processes.""" tries = 0 while not os.access(dir, os.F_OK): try: tries += 1 os.makedirs(dir) except: if tries > 5: raise def has_self_arg(func): """Return True if the given function has a 'self' argument.""" args = inspect.getargspec(func) if args and args[0] and args[0][0] in ('self', 'cls'): return True else: return False def warn(msg, stacklevel=3): """Issue a warning.""" if isinstance(msg, basestring): warnings.warn(msg, exceptions.BeakerWarning, stacklevel=stacklevel) else: warnings.warn(msg, stacklevel=stacklevel) def deprecated(message): def wrapper(fn): def deprecated_method(*args, **kargs): warnings.warn(message, DeprecationWarning, 2) return fn(*args, **kargs) # TODO: use decorator ? functools.wrapper ? deprecated_method.__name__ = fn.__name__ deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__) return deprecated_method return wrapper class ThreadLocal(object): """stores a value on a per-thread basis""" __slots__ = '_tlocal' def __init__(self): self._tlocal = _tlocal() def put(self, value): self._tlocal.value = value def has(self): return hasattr(self._tlocal, 'value') def get(self, default=None): return getattr(self._tlocal, 'value', default) def remove(self): del self._tlocal.value class SyncDict(object): """ An efficient/threadsafe singleton map algorithm, a.k.a. "get a value based on this key, and create if not found or not valid" paradigm: exists && isvalid ? get : create Designed to work with weakref dictionaries to expect items to asynchronously disappear from the dictionary. Use python 2.3.3 or greater ! a major bug was just fixed in Nov. 2003 that was driving me nuts with garbage collection/weakrefs in this section. """ def __init__(self): self.mutex = _thread.allocate_lock() self.dict = {} def get(self, key, createfunc, *args, **kwargs): try: if self.has_key(key): return self.dict[key] else: return self.sync_get(key, createfunc, *args, **kwargs) except KeyError: return self.sync_get(key, createfunc, *args, **kwargs) def sync_get(self, key, createfunc, *args, **kwargs): self.mutex.acquire() try: try: if self.has_key(key): return self.dict[key] else: return self._create(key, createfunc, *args, **kwargs) except KeyError: return self._create(key, createfunc, *args, **kwargs) finally: self.mutex.release() def _create(self, key, createfunc, *args, **kwargs): self[key] = obj = createfunc(*args, **kwargs) return obj def has_key(self, key): return self.dict.has_key(key) def __contains__(self, key): return self.dict.__contains__(key) def __getitem__(self, key): return self.dict.__getitem__(key) def __setitem__(self, key, value): self.dict.__setitem__(key, value) def __delitem__(self, key): return self.dict.__delitem__(key) def clear(self): self.dict.clear() class WeakValuedRegistry(SyncDict): def __init__(self): self.mutex = _threading.RLock() self.dict = weakref.WeakValueDictionary() sha1 = None def encoded_path(root, identifiers, extension = ".enc", depth = 3, digest_filenames=True): """Generate a unique file-accessible path from the given list of identifiers starting at the given root directory.""" ident = "_".join(identifiers) global sha1 if sha1 is None: from beaker.crypto import sha1 if digest_filenames: if py3k: ident = sha1(ident.encode('utf-8')).hexdigest() else: ident = sha1(ident).hexdigest() ident = os.path.basename(ident) tokens = [] for d in range(1, depth): tokens.append(ident[0:d]) dir = os.path.join(root, *tokens) verify_directory(dir) return os.path.join(dir, ident + extension) def asint(obj): if isinstance(obj, int): return obj elif isinstance(obj, basestring) and re.match(r'^\d+$', obj): return int(obj) else: raise Exception("This is not a proper int") def verify_options(opt, types, error): if not isinstance(opt, types): if not isinstance(types, tuple): types = (types,) coerced = False for typ in types: try: if typ in (list, tuple): opt = [x.strip() for x in opt.split(',')] else: if typ == bool: typ = asbool elif typ == int: typ = asint elif typ in (timedelta, datetime): if not isinstance(opt, typ): raise Exception("%s requires a timedelta type", typ) opt = typ(opt) coerced = True except: pass if coerced: break if not coerced: raise Exception(error) elif isinstance(opt, str) and not opt.strip(): raise Exception("Empty strings are invalid for: %s" % error) return opt def verify_rules(params, ruleset): for key, types, message in ruleset: if key in params: params[key] = verify_options(params[key], types, message) return params def coerce_session_params(params): rules = [ ('data_dir', (str, types.NoneType), "data_dir must be a string " "referring to a directory."), ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a " "directory."), ('type', (str, types.NoneType), "Session type must be a string."), ('cookie_expires', (bool, datetime, timedelta, int), "Cookie expires was " "not a boolean, datetime, int, or timedelta instance."), ('cookie_domain', (str, types.NoneType), "Cookie domain must be a " "string."), ('id', (str,), "Session id must be a string."), ('key', (str,), "Session key must be a string."), ('secret', (str, types.NoneType), "Session secret must be a string."), ('validate_key', (str, types.NoneType), "Session encrypt_key must be " "a string."), ('encrypt_key', (str, types.NoneType), "Session validate_key must be " "a string."), ('secure', (bool, types.NoneType), "Session secure must be a boolean."), ('httponly', (bool, types.NoneType), "Session httponly must be a boolean."), ('timeout', (int, types.NoneType), "Session timeout must be an " "integer."), ('auto', (bool, types.NoneType), "Session is created if accessed."), ('webtest_varname', (str, types.NoneType), "Session varname must be " "a string."), ] opts = verify_rules(params, rules) cookie_expires = opts.get('cookie_expires') if cookie_expires and isinstance(cookie_expires, int) and \ not isinstance(cookie_expires, bool): opts['cookie_expires'] = timedelta(seconds=cookie_expires) return opts def coerce_cache_params(params): rules = [ ('data_dir', (str, types.NoneType), "data_dir must be a string " "referring to a directory."), ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a " "directory."), ('type', (str,), "Cache type must be a string."), ('enabled', (bool, types.NoneType), "enabled must be true/false " "if present."), ('expire', (int, types.NoneType), "expire must be an integer representing " "how many seconds the cache is valid for"), ('regions', (list, tuple, types.NoneType), "Regions must be a " "comma seperated list of valid regions"), ('key_length', (int, types.NoneType), "key_length must be an integer " "which indicates the longest a key can be before hashing"), ] return verify_rules(params, rules) def parse_cache_config_options(config, include_defaults=True): """Parse configuration options and validate for use with the CacheManager""" # Load default cache options if include_defaults: options= dict(type='memory', data_dir=None, expire=None, log_file=None) else: options = {} for key, val in config.iteritems(): if key.startswith('beaker.cache.'): options[key[13:]] = val if key.startswith('cache.'): options[key[6:]] = val coerce_cache_params(options) # Set cache to enabled if not turned off if 'enabled' not in options and include_defaults: options['enabled'] = True # Configure region dict if regions are available regions = options.pop('regions', None) if regions: region_configs = {} for region in regions: if not region: # ensure region name is valid continue # Setup the default cache options region_options = dict(data_dir=options.get('data_dir'), lock_dir=options.get('lock_dir'), type=options.get('type'), enabled=options['enabled'], expire=options.get('expire'), key_length=options.get('key_length', 250)) region_prefix = '%s.' % region region_len = len(region_prefix) for key in options.keys(): if key.startswith(region_prefix): region_options[key[region_len:]] = options.pop(key) coerce_cache_params(region_options) region_configs[region] = region_options options['cache_regions'] = region_configs return options def func_namespace(func): """Generates a unique namespace for a function""" kls = None if hasattr(func, 'im_func'): kls = func.im_class func = func.im_func if kls: return '%s.%s' % (kls.__module__, kls.__name__) else: return '%s|%s' % (inspect.getsourcefile(func), func.__name__) Beaker-1.6.3/Beaker.egg-info/0000775000076500000240000000000011723541367015477 5ustar benstaff00000000000000Beaker-1.6.3/Beaker.egg-info/dependency_links.txt0000664000076500000240000000000111723541365021543 0ustar benstaff00000000000000 Beaker-1.6.3/Beaker.egg-info/entry_points.txt0000664000076500000240000000076011723541365020776 0ustar benstaff00000000000000 [paste.filter_factory] beaker_session = beaker.middleware:session_filter_factory [paste.filter_app_factory] beaker_session = beaker.middleware:session_filter_app_factory [beaker.backends] database = beaker.ext.database:DatabaseNamespaceManager memcached = beaker.ext.memcached:MemcachedNamespaceManager google = beaker.ext.google:GoogleNamespaceManager sqla = beaker.ext.sqla:SqlaNamespaceManager Beaker-1.6.3/Beaker.egg-info/not-zip-safe0000664000076500000240000000000110761620552017720 0ustar benstaff00000000000000 Beaker-1.6.3/Beaker.egg-info/PKG-INFO0000664000076500000240000000604311723541365016575 0ustar benstaff00000000000000Metadata-Version: 1.0 Name: Beaker Version: 1.6.3 Summary: A Session and Caching library with WSGI Middleware Home-page: http://beaker.rtfd.org/ Author: Ben Bangert, Mike Bayer, Philip Jenvey Author-email: ben@groovie.org, pjenvey@groovie.org License: BSD Description: Cache and Session Library +++++++++++++++++++++++++ About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookie's to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Cache's can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `Mercurial repository `_. Keywords: wsgi myghty session web cache middleware Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.4 Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: WSGI Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware Beaker-1.6.3/Beaker.egg-info/requires.txt0000664000076500000240000000003511723541365020073 0ustar benstaff00000000000000 [crypto] pycryptopp>=0.5.12Beaker-1.6.3/Beaker.egg-info/SOURCES.txt0000664000076500000240000000175011723541366017365 0ustar benstaff00000000000000.hgignore .hgtags CHANGELOG LICENSE setup.cfg setup.py Beaker.egg-info/PKG-INFO Beaker.egg-info/SOURCES.txt Beaker.egg-info/dependency_links.txt Beaker.egg-info/entry_points.txt Beaker.egg-info/not-zip-safe Beaker.egg-info/requires.txt Beaker.egg-info/top_level.txt beaker/__init__.py beaker/cache.py beaker/container.py beaker/converters.py beaker/exceptions.py beaker/middleware.py beaker/session.py beaker/synchronization.py beaker/util.py beaker/crypto/__init__.py beaker/crypto/jcecrypto.py beaker/crypto/pbkdf2.py beaker/crypto/pycrypto.py beaker/crypto/util.py beaker/ext/__init__.py beaker/ext/database.py beaker/ext/google.py beaker/ext/memcached.py beaker/ext/sqla.py tests/test_cache.py tests/test_container.py tests/test_cookie_only.py tests/test_database.py tests/test_increment.py tests/test_memcached.py tests/test_syncdict.py tests/test_synchronizer.py tests/test_namespacing_files/__init__.py tests/test_namespacing_files/namespace_get.py tests/test_namespacing_files/namespace_go.pyBeaker-1.6.3/Beaker.egg-info/top_level.txt0000664000076500000240000000001511723541365020223 0ustar benstaff00000000000000beaker tests Beaker-1.6.3/CHANGELOG0000664000076500000240000004617011723541240014024 0ustar benstaff00000000000000Release 1.6.3 (2/29/2012) ========================= * Fix bug with cookie deletion on leap years. Patch contributed by Greg Nelson and Michael Wirth. * Fix issue with referencing same module via different import paths. Patch contributed by brianfrantz. * Fix cookie expiration check. Patch contributed by Mike Dirolf. Release 1.6.2 (12/13/2011) ========================== * Updated dogpile lock so that it locks per namespace+key rather than on the entire namespace. (#101) * Added encryption option for any backend. Patch contributed by Toby Elliot. Release 1.6.1 (10/20/2011) ========================== * Remove stray print statement. * Include .app for consistency instead of requiring wrap_app. Release 1.6 (10/16/2011) ======================== * Fix bug with cache_key length calculation. * Fix bug with how path was set so that its restored properly and propagated. * Fix bug with CacheMiddleware clobbering enabled setting. * Update option for ``cookie_expires`` so that it can now handle an integer which will be used as the seconds till the cookie expires. * Merge fix for Issue 31, can now handle unicode cache keys. * Add ``key_length`` option for cache regions, and for keyword args passed into the cache system. Cache keys longer than this will be SHA'd. * added runtime beaker.__version__ * Add ``webtest_varname`` option to configuration to optionally include the session value in the environ vars when using Beaker with WebTest. * Defer running of pkg_resources to look for external cache modules until requested. #66 * memcached backend uses pylibmc.ThreadMappedPool to ensure thread-local usage of pylibmc when that library is in use. (#60) * memcached backend also has ``memcache_module`` string argument, allows direct specification of the name of which memcache backend to use. * Basic container/file-based Session support working in Py3K. (#72) * Further Python 3 fixes * Added an optimization to the FileNamespaceContainer when used with Session, such that the pickled contents of the file are not read a second time when session.save() is called. (#64) * Fixed bug whereby CacheManager.invalidate wouldn't work for a function decorated by cache.cache(). (#61) * cache decorators @cache.cache(), @cache_region() won't include first argument named 'self' or 'cls' as part of the cache key. This allows reasonably safe usage for methods as well as functions. (#55) * file backend no longer squashes unpickling errors. This was inconsistent behavior versus all the other backends. * invalidate_corrupt flag on Session now emits a warning. (#52) * cache.remove_value() removes the value even if it's already marked 'expired' (#42) Release 1.5.4 (6/16/2010) ========================= * Fix import error with InvalidCryptoBackendError. * Fix for domain querying on property. * Test cleanups * Fix bug with warnings preventing proper running under Jython. Release 1.5.3 (3/2/2010) ======================== * Fix Python 2.4 incompatibility with google import. Release 1.5.2 (3/1/2010) ======================== * pkg_resources scanning for additional Beaker back-ends gracefully handles situations where its not present (GAE). Fixes #36. * Avoid timing attacks on hash comparison. * Provide abstract base for MemoryNamespaceManager that deals with "dictionaries". * Added tests for invalidating cache, and fixed bug with function cache when no args are present. * The SQLAlchemy backends require SQLAlchemy 0.4 or greater (0.6 recommended). * Rudimental Python 3 support is now available. Simply use Python 3 with Distribute and "python setup.py install" to run 2to3 automatically, or manually run 2to3 on "beaker" and "tests" to convert to a Python 3 version. * Added support for PyCrypto module to encrypted session, etc. in addition to the existing pycryptopp support. Release 1.5.1 (12/17/2009) ========================== * Fix cache namespacing. Release 1.5 (11/23/2009) ======================== * Update memcached to default to using pylibmc when available. * Fix bug when cache value doesn't exist causing has_key to throw an exception rather than return False. Fixes #24. * Fix bug where getpid under GAE is used improperly to assume it should be a non-string. Fixes #22. * Add cache_region decorator that works *before* configuration of the cache regions have been completed for use in module-level decorations. * Fix bug where has_value sees the value before its removed. * Improved accuracy of "dogpile" checker by removing dependency on "self" attributes, which seem to be slightly unreliable in highly concurrent scenarios. Release 1.4.2 (9/25/2009) ========================= * Fix bug where memcached may yank a value after the has_value but before the value can be fetched. * Fix properties for setting the path. Fixes #15. * Fix the 'TypeError: argument must be an int, or have a fileno() method' erorr sporadically emitted by FileSynchronizer under moderate load. Release 1.4.1 (9/10/2009) ========================= * Fix verification of options to throw an error if a beaker param is an empty string. * Add CacheManager.invalidate function to easily invalidate cache spaces created by the use of the cache decorator. * Add CacheManager.region_invalidate function to easily invalidate cache spaces created by the use of the cache_region decorator. * Fix the InvalidCryptoBackendError exception triggering a TypeError. Patch from dz, fixes #13. Release 1.4 (7/24/2009) ======================= * Fix bug with hmac on Python 2.4. Patch from toshio, closes ticket #2133 from the TurboGears2 Trac. * Fix bug with occasional ValueError from FileNamespaceManager.do_open. Fixes #10. * Fixed bug with session files being saved despite being new and not saved. * Fixed bug with CacheMiddleware overwriting configuration with default arguments despite prior setting. * Fixed bug with SyntaxError not being caught properly in entry point discovery. * Changed to using BlobProperty for Google Datastore. * Added domain/path properties to the session. This allows one to dynamically set the cookie's domain and/or path on the fly, which will then be set on the cookie for the session. * Added support for cookie-based sessions in Jython via the JCE (Java Cryptography Extensions). Patch from Alex Grönholm. * Update Beaker database extensions to work with SQLAlchemy 0.6 PostgreSQL, and Jython. Release 1.3.1 (5/5/2009) ======================== * Added a whole bunch of Sphinx documentation for the updated site. * Added corresponding remove as an alias to the caches remove_value. * Fixed cookie session not having an invalidate function. * Fix bug with CacheMiddleware not using proper function to load configuration options, missing the cache regions. Release 1.3 (4/6/2009) ====================== * Added last_accessed attribute to session to indicate the previous time the session was last accessed. * Added setuptools entry points to dynamically discover additional namespace backends. * Fixed bug with invalidate and locks, fixes #594. * Added cache.cache decorator for arbitrary caching. * Added cache.region decorator to the CacheManager object. * Added cache regions. Can be provided in a configuration INI type, or by adding in a cache_regions arg to the CacheManager. * Fix bug with timeout not being saved properly. * Fix bug with cookie-only sessions sending cookies for new sessions even if they weren't supposed to be saved. * Fix bug that caused a non-auto accessed session to not record the time it was previously accessed resulting in session timeouts. * Add function to parse configuration dicts as appropriate for use with the CacheManager. * The "expiretime" is no longer passed to the memcached backend - since if memcached makes the expired item unavailable at the same time the container expires it, then all actors must block until the new value is available (i.e. breaks the anti-dogpile logic). Release 1.2.3 (3/2/2009) ======================== * Fix accessed increment to take place *after* the accessed time is checked to see if it has expired. Fixes #580. * data_dir/lock_dir parameters are optional to most backends; if not present, mutex-based locking will be used for creation functions * Adjustments to Container to better account for backends which don't provide read/write locks, such as memcached. As a result, the plain "memory" cache no longer requires read/write mutexing. Release 1.2.2 (2/14/2009) ========================= * Fix delete bug reported by andres with session not being deleted. Release 1.2.1 (2/09/2009) ========================= * Fix memcached behavior as memcached returns None on nonexistent key fetch which broke invalid session checking. Release 1.2 (1/22/2009) ======================= * Updated session to only save to the storage *once* no under any/all conditions rather than every time save() is called. * Added session.revert() function that reverts the session to the state at the beginning of the request. * Updated session to store entire session data in a single namespace key, this lets memcached work properly, and makes for more efficient use of the storage system for sessions. Release 1.1.3 (12/29/2008) ========================== * Fix the 1.1.2 old cache/session upgrader to handle the has_current_value method. * Make InvalidCacheBackendError an ImportError. Release 1.1.2 (11/24/2008) ========================== * Upgrade Beaker pre-1.1 cache/session values to the new format rather than throwing an exception. Release 1.1.1 (11/24/2008) ========================== * Fixed bug in Google extension which passed arguments it should no longer pass to NamespaceManager. * Fixed bug involving lockfiles left open during cache "value creation" step. Release 1.1 (11/16/2008) ======================== * file-based cache will not hold onto cached value once read from file; will create new value if the file is deleted as opposed to re-using what was last read. This allows external removal of files to be used as a cache-invalidation mechanism. * file-based locking will not unlink lockfiles; this can interfere with the flock() mechanism in the event that a concurrent process is accessing the files. * Sending "type" and other namespace config arguments to cache.get()/ cache.put()/cache.remove_value() is deprecated. The namespace configuration is now preferred at the Cache level, i.e. when you construct a Cache or call cache_manager.get_cache(). This removes the ambiguity of Cache's dictionary interface and has_key() methods, which have no awareness of those arguments. * the "expiretime" in use is stored in the cache itself, so that it is always available when calling has_key() and other methods. Between this change and the deprecation of 'type', the Cache no longer has any need to store cache configuration in memory per cache key, which in a dynamically-generated key scenario stores an arbitrarily large number of configurations - essentially a memory leak. * memcache caching has been vastly improved, no longer stores a list of all keys, which along the same theme prevented efficient usage for an arbitrarily large number of keys. The keys() method is now unimplemented, and cache.remove() clears the entire memcache cache across all namespaces. This is what the memcache API provides so it's the best we can do. * memcache caching passes along "expiretime" to the memcached "time" parameter, so that the cache itself can reduce its size for elements which are expired (memcache seems to manage its size in any case, this is just a hint to improve its operation). * replaced homegrown ThreadLocal implementation with threading.local, falls back to a 2.3 compat one for python<2.4 Release 1.0.3 (10/14/2008) ========================== * Fixed os.getpid issue on GAE. * CookieSession will add '_expires' value to data when an expire time is set, and uses it Release 1.0.2 (9/22/2008) ========================= * Fixed bug caused when attempting to invalidate a session that hadn't previously been created. Release 1.0.1 (8/19/2008) ========================= * Bug fix for cookie sessions to retain id before clearing values. Release 1.0 (8/13/2008) ======================= * Added cookie delete to both cookie only sessions and normal sessions, to help with proxies and such that may determine whether a user is logged in via a cookie. (cookie varies, etc.). Suggested by Felix Schwarz. * cache.get_value() now uses the given **kwargs in all cases in the same manner as cache.set_value(). This way you can send a new createfunc to cache.get_value() each time and it will be used. Release 0.9.5 (6/19/2008) ========================= * Fixed bug in memcached to be tolerant of keys disappearing when memcached expires them. * Fixed the cache functionality to actually work, previously set_value was ignored if there was already a value set. Release 0.9.4 (4/13/2008) ========================= * Adding 'google' backend datastore, available by specifying 'google' as the cache/session type. Note that this takes an optional table_name used to name the model class used. * SECURITY BUG: Fixed security issue with Beaker not properly removing directory escaping characters from the session ID when un-signed sessions are used. Reported with patch by Felix Schwarz. * Fixed bug with Beaker not playing well with Registry when its placed above it in the stack. Thanks Wichert Akkerman. Release 0.9.3 (2/28/2008) ========================= * Adding 'id' to cookie-based sessions for better compatibility. * Fixed error with exception still raised for PyCrypto missing. * WARNING: Session middleware no longer catches Paste HTTP Exceptions, apps are now expected to capture and handle Paste HTTP Exceptions themselves. * Fixed Python 2.4 compatibility bug in hmac. * Fixed key lookup bug on cache object to only use the settings for the key lookup. Found by Andrew Stromnov. Release 0.9.2 (2/13/2008) ========================= * Added option to make Beaker use a secure cookie. * Removed CTRCipher as pycryptopp doesn't need it. * Changed AES to use 256 bit. * Fixed signing code to use hmac with sha for better signing security. * Fixed memcached code to use delete_multi on clearing the keys for efficiency and updated key retrieval to properly store and retrieve None values. * Removing cookie.py and signed cookie middleware, as the environ_key option for session middleware provides a close enough setting. * Added option to use just cookie-based sessions without requiring encryption. * Switched encryption requirement from PyCrypto to pycryptopp which uses a proper AES in Counter Mode. Release 0.9.1 (2/4/2008) ======================== * Fixed bug in middleware using module that wasn't imported. Release 0.9 (12/17/07) ====================== * Fixed bug in memcached replace to actually replace spaces properly. * Fixed md5 cookie signature to use SHA-1 when available. * Updated cookie-based session storage to use 256-bit AES-CTR mode with a SHA-1 HMAC signature. Now requires PyCrypto to use for AES scheme. * WARNING: Moved session and cache middleware to middleware, as per the old deprecation warnings had said was going to happen for 0.8. * Added cookie-only session storage with RC4 ciphered encryption, requires Python 2.4. * Add the ability to specify the cookie's domain for sessions. Release 0.8.1 (11/15/07) ======================== * Fixed bug in database.py not properly handling missing sqlalchemy library. Release 0.8 (10/17/07) ====================== * Fixed bug in prior db update causing session to occasionally not be written back to the db. * Fixed memcached key error with keys containing spaces. Thanks Jim Musil. * WARNING: Major change to ext:database to use a single row per namespace. Additionally, there's an accessed and created column present to support easier deletion of old cache/session data. You *will* need to drop any existing tables being used by the ext:database backend. * Streamline ext:database backend to avoid unnecessary database selects for repeat data. * Added SQLAlchemy 0.4 support to ext:database backend. Release 0.7.5 (08/18/07) ======================== * Fixed data_dir parsing for session string coercions, no longer picks up None as a data_dir. * Fixed session.get_by_id to lookup recently saved sessions properly, also updates session with creation/access time upon save. * Add unit tests for get_by_id function. Updated get_by_id to not result in additional session files. * Added session.get_by_id function to retrieve a session of the given id. Release 0.7.4 (07/09/07) ======================== * Fixed issue with Beaker not properly handling arguments as Pylons may pass them in. * Fixed unit test to catch file removal exception. * Fixed another bug in synchronization, this one involving reentrant conditions with file synchronization * If a file open fails due to pickling errors, locks just opened are released unconditionally Release 0.7.3 (06/08/07) ======================== * Beaker was not properly parsing input options to session middleware. Thanks to Yannick Gingras and Timothy S for spotting the issue. * Changed session to only send the cookie header if its a new session and save() was called. Also only creates the session file under these conditions. Release 0.7.2 (05/19/07) ======================== * Added deprecation warning for middleware move, relocated middleware to cache and session modules for backwards compatibility. Release 0.7.1 05/18/07) ======================= * adjusted synchronization logic to account for Mako/new Cache object's multithreaded usage of Container. Release 0.7 (05/18/07) ====================== * WARNING: Cleaned up Cache object based on Mako cache object, this changes the call interface slightly for creating a Cache object directly. The middleware cache object is unaffected from an end-user view. This change also avoids duplicate creations of Cache objects. * Adding database backend and unit tests. * Added memcached test, fixed memcached namespace arg passing. * Fixed session and cache tests, still failing syncdict test. Added doctests for Cache and Session middleware. * Cleanup of container/cache/container_test * Namespaces no longer require a context, removed NamespaceContext? * Logging in container.py uses logging module * Cleanup of argument passing, use name **kwargs instead of **params for generic kwargs * Container classes contain a static create_namespace() method, namespaces are accessed from the ContainerContext? via string name + container class alone * Implemented (but not yet tested) clear() method on Cache, locates all Namespaces used thus far and clears each one based on its keys() collection * Fixed Cache.clear() method to actually clear the Cache namespace. * Updated memcached backend to split servers on ';' for multiple memcached backends. * Merging MyghtyUtils code into Beaker. Release 0.6.3 (03/18/2007) ========================== * Added api with customized Session that doesn't require a Myghty request object, just a dict. Updated session to use the new version. * Removing unicode keys as some dbm backends can't handle unicode keys. * Adding core files that should've been here. * More stringent checking for existence of a session. * Avoid recreating the session object when it's empty. Beaker-1.6.3/LICENSE0000664000076500000240000000275711267116536013633 0ustar benstaff00000000000000Copyright (c) 2006, 2007 Ben Bangert, Mike Bayer, Philip Jenvey and contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author or contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Beaker-1.6.3/PKG-INFO0000664000076500000240000000604311723541367013714 0ustar benstaff00000000000000Metadata-Version: 1.0 Name: Beaker Version: 1.6.3 Summary: A Session and Caching library with WSGI Middleware Home-page: http://beaker.rtfd.org/ Author: Ben Bangert, Mike Bayer, Philip Jenvey Author-email: ben@groovie.org, pjenvey@groovie.org License: BSD Description: Cache and Session Library +++++++++++++++++++++++++ About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookie's to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Cache's can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `Mercurial repository `_. Keywords: wsgi myghty session web cache middleware Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.4 Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Internet :: WWW/HTTP :: WSGI Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware Beaker-1.6.3/setup.cfg0000664000076500000240000000027611723541367014442 0ustar benstaff00000000000000[egg_info] tag_build = tag_svn_revision = 0 tag_date = 0 [nosetests] where = tests verbose = True detailed-errors = True with-doctest = True cover-package = beaker cover-inclusive = True Beaker-1.6.3/setup.py0000664000076500000240000001006211671767317014334 0ustar benstaff00000000000000import os import sys import re from setuptools import setup, find_packages v = open(os.path.join(os.path.dirname(__file__), 'beaker', '__init__.py')) VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v.read()).group(1) v.close() extra = {} if sys.version_info >= (3, 0): extra.update( use_2to3=True, ) pycryptopp = 'pycryptopp>=0.5.12' tests_require = ['nose', 'webtest', 'Mock'] if not sys.platform.startswith('java') and not sys.platform == 'cli': tests_require.extend([pycryptopp, 'SQLALchemy']) try: import sqlite3 except ImportError: tests_require.append('pysqlite') setup(name='Beaker', version=VERSION, description="A Session and Caching library with WSGI Middleware", long_description="""\ Cache and Session Library +++++++++++++++++++++++++ About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons `_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookie's to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Cache's can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site `_. Source ====== The latest developer version is available in a `Mercurial repository `_. """, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware', ], keywords='wsgi myghty session web cache middleware', author='Ben Bangert, Mike Bayer, Philip Jenvey', author_email='ben@groovie.org, pjenvey@groovie.org', url='http://beaker.rtfd.org/', license='BSD', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), zip_safe=False, install_requires=[], extras_require={ 'crypto':[pycryptopp] }, test_suite='nose.collector', tests_require=tests_require, entry_points=""" [paste.filter_factory] beaker_session = beaker.middleware:session_filter_factory [paste.filter_app_factory] beaker_session = beaker.middleware:session_filter_app_factory [beaker.backends] database = beaker.ext.database:DatabaseNamespaceManager memcached = beaker.ext.memcached:MemcachedNamespaceManager google = beaker.ext.google:GoogleNamespaceManager sqla = beaker.ext.sqla:SqlaNamespaceManager """, **extra ) Beaker-1.6.3/tests/0000775000076500000240000000000011723541367013756 5ustar benstaff00000000000000Beaker-1.6.3/tests/test_cache.py0000664000076500000240000002343111542263524016430 0ustar benstaff00000000000000# coding: utf-8 import os import platform import shutil import tarfile import tempfile import time from beaker.middleware import CacheMiddleware from beaker import util from beaker.cache import Cache from nose import SkipTest from beaker.util import skip_if import base64 import zlib try: from webtest import TestApp except ImportError: TestApp = None # Tarballs of the output of: # >>> from beaker.cache import Cache # >>> c = Cache('test', data_dir='db', type='dbm') # >>> c['foo'] = 'bar' # in the old format, Beaker @ revision: 24f57102d310 dbm_cache_tar = """\ eJzt3EtOwkAAgOEBjTHEBDfu2ekKZ6bTTnsBL+ABzPRB4osSRBMXHsNruXDl3nMYLaEbpYRAaIn6 f8kwhFcn/APLSeNTUTdZsL4/m4Pg21wSqiCt9D1PC6mUZ7Xo+bWvrHB/N3HjXk+MrrLhQ/a48HXL nv+l0vg0yYcTdznMxhdpfFvHbpj1lyv0N8oq+jdhrr/b/A5Yo79R9G9ERX8XbXgLrNHfav7/G1Hd 30XGhYPMT5JYRbELVGISGVov9SKVRaGNQj2I49TrF+8oxpJrTAMHxizob+b7ay+Y/v5lE1/AP+8v 9o5ccdsWYvdViMPpIwdCtMRsiP3yTrucd8r5pJxbz8On9/KT2uVo3H5rG1cFAAAAAOD3aIuP7lv3 pRjbXgkAAAAAAFjVyc1Idc6U1lYGgbSmL0Mjpe248+PYjY87I91x/UGeb3udAAAAAACgfh+fAAAA AADgr/t5/sPFTZ5cb/38D19Lzn9pRHX/zR4CtEZ/o+nfiEX9N3kI0Gr9vWl/W0z0BwAAAAAAAAAA AAAAAAAAqPAFyOvcKA== """ if util.py3k: dbm_cache_tar = dbm_cache_tar.encode('ascii') dbm_cache_tar = zlib.decompress(base64.b64decode(dbm_cache_tar)) # dumbdbm format dumbdbm_cache_tar = """\ eJzt191qgzAYBmCPvYqc2UGx+ZKY6A3scCe7gJKoha6binOD3f2yn5Ouf3TTlNH3AQlEJcE3nyGV W0RT457Jsq9W6632W0Se0JI49/1E0vCIZZPPzHt5HmzPWNQ91M1r/XbwuVP3/6nKLcq2Gey6qftl 5Z6mWA3n56/IKOQfwk7+dvwV8Iv8FSH/IPbkb4uRl8BZ+fvg/WUE8g9if/62UDZf1VlZOiqc1VSq kudGVrKgushNkYuVc5VM/Rups5vjY3wErJU6nD+Z7fyFNFpEjIf4AFeef7Jq22TOZnzOpLiJLz0d CGyE+q/scHyMk/Wv+E79G0L9hzC7JSFMpv0PN0+J4rv7xNk+iTuKh07E6aXnB9Mao/7X/fExzt// FecS9R8C9v/r9rP+l49tubnk+e/z/J8JjvMfAAAAAAAAAADAn70DFJAAwQ== """ if util.py3k: dumbdbm_cache_tar = dumbdbm_cache_tar.encode('ascii') dumbdbm_cache_tar = zlib.decompress(base64.b64decode(dumbdbm_cache_tar)) def simple_app(environ, start_response): clear = False if environ.get('beaker.clear'): clear = True cache = environ['beaker.cache'].get_cache('testcache') if clear: cache.clear() try: value = cache.get_value('value') except: value = 0 cache.set_value('value', value+1) start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %s' % cache.get_value('value')] def cache_manager_app(environ, start_response): cm = environ['beaker.cache'] cm.get_cache('test')['test_key'] = 'test value' start_response('200 OK', [('Content-type', 'text/plain')]) yield "test_key is: %s\n" % cm.get_cache('test')['test_key'] cm.get_cache('test').clear() try: test_value = cm.get_cache('test')['test_key'] except KeyError: yield "test_key cleared" else: yield "test_key wasn't cleared, is: %s\n" % \ cm.get_cache('test')['test_key'] def test_has_key(): cache = Cache('test', data_dir='./cache', type='dbm') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache assert not cache.has_key("foo") assert "foo" not in cache cache.remove_value("test") assert not cache.has_key("test") def test_expire_changes(): cache = Cache('test_bar', data_dir='./cache', type='dbm') cache.set_value('test', 10) assert cache.has_key('test') assert cache['test'] == 10 # ensure that we can change a never-expiring value cache.set_value('test', 20, expiretime=1) assert cache.has_key('test') assert cache['test'] == 20 time.sleep(1) assert not cache.has_key('test') # test that we can change it before its expired cache.set_value('test', 30, expiretime=50) assert cache.has_key('test') assert cache['test'] == 30 cache.set_value('test', 40, expiretime=3) assert cache.has_key('test') assert cache['test'] == 40 time.sleep(3) assert not cache.has_key('test') def test_fresh_createfunc(): cache = Cache('test_foo', data_dir='./cache', type='dbm') x = cache.get_value('test', createfunc=lambda: 10, expiretime=2) assert x == 10 x = cache.get_value('test', createfunc=lambda: 12, expiretime=2) assert x == 10 x = cache.get_value('test', createfunc=lambda: 14, expiretime=2) assert x == 10 time.sleep(2) x = cache.get_value('test', createfunc=lambda: 16, expiretime=2) assert x == 16 x = cache.get_value('test', createfunc=lambda: 18, expiretime=2) assert x == 16 cache.remove_value('test') assert not cache.has_key('test') x = cache.get_value('test', createfunc=lambda: 20, expiretime=2) assert x == 20 def test_has_key_multicache(): cache = Cache('test', data_dir='./cache', type='dbm') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache cache = Cache('test', data_dir='./cache', type='dbm') assert cache.has_key("test") def test_unicode_keys(): cache = Cache('test', data_dir='./cache', type='dbm') o = object() cache.set_value(u'hiŏ', o) assert u'hiŏ' in cache assert u'hŏa' not in cache cache.remove_value(u'hiŏ') assert u'hiŏ' not in cache def test_remove_stale(): """test that remove_value() removes even if the value is expired.""" cache = Cache('test', type='memory') o = object() cache.namespace['key'] = (time.time() - 30, 10, o) container = cache._get_value('key') assert not container.has_current_value() assert 'key' in container.namespace cache.remove_value('key') assert 'key' not in container.namespace # safe to call again cache.remove_value('key') def test_multi_keys(): cache = Cache('newtests', data_dir='./cache', type='dbm') cache.clear() called = {} def create_func(): called['here'] = True return 'howdy' try: cache.get_value('key1') except KeyError: pass else: raise Exception("Failed to keyerror on nonexistent key") assert 'howdy' == cache.get_value('key2', createfunc=create_func) assert called['here'] == True del called['here'] try: cache.get_value('key3') except KeyError: pass else: raise Exception("Failed to keyerror on nonexistent key") try: cache.get_value('key1') except KeyError: pass else: raise Exception("Failed to keyerror on nonexistent key") assert 'howdy' == cache.get_value('key2', createfunc=create_func) assert called == {} @skip_if(lambda: TestApp is None, "webtest not installed") def test_increment(): app = TestApp(CacheMiddleware(simple_app)) res = app.get('/', extra_environ={'beaker.type':type, 'beaker.clear':True}) assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res @skip_if(lambda: TestApp is None, "webtest not installed") def test_cache_manager(): app = TestApp(CacheMiddleware(cache_manager_app)) res = app.get('/') assert 'test_key is: test value' in res assert 'test_key cleared' in res def test_clsmap_nonexistent(): from beaker.cache import clsmap try: clsmap['fake'] assert False except KeyError: pass def test_clsmap_present(): from beaker.cache import clsmap assert clsmap['memory'] def test_legacy_cache(): cache = Cache('newtests', data_dir='./cache', type='dbm') cache.set_value('x', '1') assert cache.get_value('x') == '1' cache.set_value('x', '2', type='file', data_dir='./cache') assert cache.get_value('x') == '1' assert cache.get_value('x', type='file', data_dir='./cache') == '2' cache.remove_value('x') cache.remove_value('x', type='file', data_dir='./cache') assert cache.get_value('x', expiretime=1, createfunc=lambda: '5') == '5' assert cache.get_value('x', expiretime=1, createfunc=lambda: '6', type='file', data_dir='./cache') == '6' assert cache.get_value('x', expiretime=1, createfunc=lambda: '7') == '5' assert cache.get_value('x', expiretime=1, createfunc=lambda: '8', type='file', data_dir='./cache') == '6' time.sleep(1) assert cache.get_value('x', expiretime=1, createfunc=lambda: '9') == '9' assert cache.get_value('x', expiretime=1, createfunc=lambda: '10', type='file', data_dir='./cache') == '10' assert cache.get_value('x', expiretime=1, createfunc=lambda: '11') == '9' assert cache.get_value('x', expiretime=1, createfunc=lambda: '12', type='file', data_dir='./cache') == '10' def test_upgrade(): # If we're on OSX, lets run this since its OSX dump files, otherwise # we have to skip it if platform.system() != 'Darwin': return for test in _test_upgrade_has_key, _test_upgrade_in, _test_upgrade_setitem: for mod, tar in (('dbm', dbm_cache_tar), ('dumbdbm', dumbdbm_cache_tar)): try: __import__(mod) except ImportError: continue dir = tempfile.mkdtemp() fd, name = tempfile.mkstemp(dir=dir) fp = os.fdopen(fd, 'wb') fp.write(tar) fp.close() tar = tarfile.open(name) for member in tar.getmembers(): tar.extract(member, dir) tar.close() try: test(os.path.join(dir, 'db')) finally: shutil.rmtree(dir) def _test_upgrade_has_key(dir): cache = Cache('test', data_dir=dir, type='dbm') assert cache.has_key('foo') assert cache.has_key('foo') def _test_upgrade_in(dir): cache = Cache('test', data_dir=dir, type='dbm') assert 'foo' in cache assert 'foo' in cache def _test_upgrade_setitem(dir): cache = Cache('test', data_dir=dir, type='dbm') assert cache['foo'] == 'bar' assert cache['foo'] == 'bar' def teardown(): import shutil shutil.rmtree('./cache', True) Beaker-1.6.3/tests/test_container.py0000664000076500000240000001240111542263524017342 0ustar benstaff00000000000000import os import random import time from beaker.container import * from beaker.synchronization import _synchronizers from beaker.cache import clsmap from threading import Thread class CachedWidget(object): totalcreates = 0 delay = 0 def __init__(self): CachedWidget.totalcreates += 1 time.sleep(CachedWidget.delay) self.time = time.time() def _run_container_test(cls, totaltime, expiretime, delay, threadlocal): print "\ntesting %s for %d secs with expiretime %s delay %d" % ( cls, totaltime, expiretime, delay) CachedWidget.totalcreates = 0 CachedWidget.delay = delay # allow for python overhead when checking current time against expire times fudge = 1 starttime = time.time() running = [True] class RunThread(Thread): def run(self): print "%s starting" % self if threadlocal: localvalue = Value( 'test', cls('test', data_dir='./cache'), createfunc=CachedWidget, expiretime=expiretime, starttime=starttime) localvalue.clear_value() else: localvalue = value try: while running[0]: item = localvalue.get_value() if expiretime is not None: currenttime = time.time() itemtime = item.time assert itemtime + expiretime + delay + fudge >= currenttime, \ "created: %f expire: %f delay: %f currenttime: %f" % \ (itemtime, expiretime, delay, currenttime) time.sleep(random.random() * .00001) except: running[0] = False raise print "%s finishing" % self if not threadlocal: value = Value( 'test', cls('test', data_dir='./cache'), createfunc=CachedWidget, expiretime=expiretime, starttime=starttime) value.clear_value() else: value = None threads = [RunThread() for i in range(1, 8)] for t in threads: t.start() time.sleep(totaltime) failed = not running[0] running[0] = False for t in threads: t.join() assert not failed, "One or more threads failed" if expiretime is None: expected = 1 else: expected = totaltime / expiretime + 1 assert CachedWidget.totalcreates <= expected, \ "Number of creates %d exceeds expected max %d" % (CachedWidget.totalcreates, expected) def test_memory_container(totaltime=10, expiretime=None, delay=0, threadlocal=False): _run_container_test(clsmap['memory'], totaltime, expiretime, delay, threadlocal) def test_dbm_container(totaltime=10, expiretime=None, delay=0): _run_container_test(clsmap['dbm'], totaltime, expiretime, delay, False) def test_file_container(totaltime=10, expiretime=None, delay=0, threadlocal=False): _run_container_test(clsmap['file'], totaltime, expiretime, delay, threadlocal) def test_memory_container_tlocal(): test_memory_container(expiretime=5, delay=2, threadlocal=True) def test_memory_container_2(): test_memory_container(expiretime=2) def test_memory_container_3(): test_memory_container(expiretime=5, delay=2) def test_dbm_container_2(): test_dbm_container(expiretime=2) def test_dbm_container_3(): test_dbm_container(expiretime=5, delay=2) def test_file_container_2(): test_file_container(expiretime=2) def test_file_container_3(): test_file_container(expiretime=5, delay=2) def test_file_container_tlocal(): test_file_container(expiretime=5, delay=2, threadlocal=True) def test_file_open_bug(): """ensure errors raised during reads or writes don't lock the namespace open.""" value = Value('test', clsmap['file']('reentrant_test', data_dir='./cache')) if os.path.exists(value.namespace.file): os.remove(value.namespace.file) value.set_value("x") f = open(value.namespace.file, 'w') f.write("BLAH BLAH BLAH") f.close() # TODO: do we have an assertRaises() in nose to use here ? try: value.set_value("y") assert False except: pass _synchronizers.clear() value = Value('test', clsmap['file']('reentrant_test', data_dir='./cache')) # TODO: do we have an assertRaises() in nose to use here ? try: value.set_value("z") assert False except: pass def test_removing_file_refreshes(): """test that the cache doesn't ignore file removals""" x = [0] def create(): x[0] += 1 return x[0] value = Value('test', clsmap['file']('refresh_test', data_dir='./cache'), createfunc=create, starttime=time.time() ) if os.path.exists(value.namespace.file): os.remove(value.namespace.file) assert value.get_value() == 1 assert value.get_value() == 1 os.remove(value.namespace.file) assert value.get_value() == 2 def teardown(): import shutil shutil.rmtree('./cache', True) Beaker-1.6.3/tests/test_cookie_only.py0000664000076500000240000001125211723457615017704 0ustar benstaff00000000000000import datetime import re import os import beaker.session from beaker.middleware import SessionMiddleware from nose import SkipTest try: from webtest import TestApp except ImportError: raise SkipTest("webtest not installed") from beaker import crypto if not crypto.has_aes: raise SkipTest("No AES library is installed, can't test cookie-only " "Sessions") def simple_app(environ, start_response): session = environ['beaker.session'] if not session.has_key('value'): session['value'] = 0 session['value'] += 1 if not environ['PATH_INFO'].startswith('/nosave'): session.save() start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %d and cookie is %s' % (session['value'], session)] def test_increment(): options = {'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res def test_expires(): options = {'session.validate_key':'hoobermas', 'session.type':'cookie', 'session.cookie_expires': datetime.timedelta(days=1)} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'expires=' in res.headers.getall('Set-Cookie')[0] assert 'current value is: 1' in res def test_different_sessions(): options = {'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) app2 = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app2.get('/') assert 'current value is: 1' in res res = app2.get('/') res = app2.get('/') res = app2.get('/') res2 = app.get('/') assert 'current value is: 2' in res2 assert 'current value is: 4' in res def test_nosave(): options = {'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/nosave') assert 'current value is: 1' in res assert [] == res.headers.getall('Set-Cookie') res = app.get('/nosave') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 1' in res assert len(res.headers.getall('Set-Cookie')) > 0 res = app.get('/') assert 'current value is: 2' in res def test_increment_with_encryption(): options = {'session.encrypt_key':'666a19cf7f61c64c', 'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res def test_different_sessions_with_encryption(): options = {'session.encrypt_key':'666a19cf7f61c64c', 'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) app2 = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app2.get('/') assert 'current value is: 1' in res res = app2.get('/') res = app2.get('/') res = app2.get('/') res2 = app.get('/') assert 'current value is: 2' in res2 assert 'current value is: 4' in res def test_nosave_with_encryption(): options = {'session.encrypt_key':'666a19cf7f61c64c', 'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/nosave') assert 'current value is: 1' in res assert [] == res.headers.getall('Set-Cookie') res = app.get('/nosave') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 1' in res assert len(res.headers.getall('Set-Cookie')) > 0 res = app.get('/') assert 'current value is: 2' in res def test_cookie_id(): options = {'session.encrypt_key':'666a19cf7f61c64c', 'session.validate_key':'hoobermas', 'session.type':'cookie'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert "_id':" in res sess_id = re.sub(r".*'_id': '(.*?)'.*", r'\1', res.body) res = app.get('/') new_id = re.sub(r".*'_id': '(.*?)'.*", r'\1', res.body) assert new_id == sess_id if __name__ == '__main__': from paste import httpserver wsgi_app = SessionMiddleware(simple_app, {}) httpserver.serve(wsgi_app, host='127.0.0.1', port=8080) Beaker-1.6.3/tests/test_database.py0000664000076500000240000000652011542263524017131 0ustar benstaff00000000000000# coding: utf-8 from beaker.cache import clsmap, Cache, util from beaker.exceptions import InvalidCacheBackendError from beaker.middleware import CacheMiddleware from nose import SkipTest try: from webtest import TestApp except ImportError: TestApp = None try: clsmap['ext:database']._init_dependencies() except InvalidCacheBackendError: raise SkipTest("an appropriate SQLAlchemy backend is not installed") db_url = 'sqlite:///test.db' def simple_app(environ, start_response): extra_args = {} clear = False if environ.get('beaker.clear'): clear = True extra_args['type'] = 'ext:database' extra_args['url'] = db_url extra_args['data_dir'] = './cache' cache = environ['beaker.cache'].get_cache('testcache', **extra_args) if clear: cache.clear() try: value = cache.get_value('value') except: value = 0 cache.set_value('value', value+1) start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %s' % cache.get_value('value')] def cache_manager_app(environ, start_response): cm = environ['beaker.cache'] cm.get_cache('test')['test_key'] = 'test value' start_response('200 OK', [('Content-type', 'text/plain')]) yield "test_key is: %s\n" % cm.get_cache('test')['test_key'] cm.get_cache('test').clear() try: test_value = cm.get_cache('test')['test_key'] except KeyError: yield "test_key cleared" else: yield "test_key wasn't cleared, is: %s\n" % \ cm.get_cache('test')['test_key'] def test_has_key(): cache = Cache('test', data_dir='./cache', url=db_url, type='ext:database') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache assert not cache.has_key("foo") assert "foo" not in cache cache.remove_value("test") assert not cache.has_key("test") def test_has_key_multicache(): cache = Cache('test', data_dir='./cache', url=db_url, type='ext:database') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache cache = Cache('test', data_dir='./cache', url=db_url, type='ext:database') assert cache.has_key("test") cache.remove_value('test') def test_clear(): cache = Cache('test', data_dir='./cache', url=db_url, type='ext:database') o = object() cache.set_value("test", o) assert cache.has_key("test") cache.clear() assert not cache.has_key("test") def test_unicode_keys(): cache = Cache('test', data_dir='./cache', url=db_url, type='ext:database') o = object() cache.set_value(u'hiŏ', o) assert u'hiŏ' in cache assert u'hŏa' not in cache cache.remove_value(u'hiŏ') assert u'hiŏ' not in cache @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_increment(): app = TestApp(CacheMiddleware(simple_app)) res = app.get('/', extra_environ={'beaker.clear':True}) assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_cache_manager(): app = TestApp(CacheMiddleware(cache_manager_app)) res = app.get('/') assert 'test_key is: test value' in res assert 'test_key cleared' in res Beaker-1.6.3/tests/test_increment.py0000664000076500000240000001443311542263524017353 0ustar benstaff00000000000000import re import os from beaker.middleware import SessionMiddleware from nose import SkipTest try: from webtest import TestApp except ImportError: raise SkipTest("webtest not installed") def teardown(): import shutil shutil.rmtree('./cache', True) def no_save_app(environ, start_response): session = environ['beaker.session'] sess_id = environ.get('SESSION_ID') start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %s, session id is %s' % (session.get('value'), session.id)] def simple_app(environ, start_response): session = environ['beaker.session'] sess_id = environ.get('SESSION_ID') if sess_id: session = session.get_by_id(sess_id) if not session: start_response('200 OK', [('Content-type', 'text/plain')]) return ["No session id of %s found." % sess_id] if not session.has_key('value'): session['value'] = 0 session['value'] += 1 if not environ['PATH_INFO'].startswith('/nosave'): session.save() start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %d, session id is %s' % (session['value'], session.id)] def simple_auto_app(environ, start_response): """Like the simple_app, but assume that sessions auto-save""" session = environ['beaker.session'] sess_id = environ.get('SESSION_ID') if sess_id: session = session.get_by_id(sess_id) if not session: start_response('200 OK', [('Content-type', 'text/plain')]) return ["No session id of %s found." % sess_id] if not session.has_key('value'): session['value'] = 0 session['value'] += 1 if environ['PATH_INFO'].startswith('/nosave'): session.revert() start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %d, session id is %s' % (session.get('value', 0), session.id)] def test_no_save(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(no_save_app, **options)) res = app.get('/') assert 'current value is: None' in res assert [] == res.headers.getall('Set-Cookie') def test_increment(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res def test_increment_auto(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_auto_app, auto=True, **options)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res def test_different_sessions(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_app, **options)) app2 = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app2.get('/') assert 'current value is: 1' in res res = app2.get('/') res = app2.get('/') res = app2.get('/') res2 = app.get('/') assert 'current value is: 2' in res2 assert 'current value is: 4' in res def test_different_sessions_auto(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_auto_app, auto=True, **options)) app2 = TestApp(SessionMiddleware(simple_auto_app, auto=True, **options)) res = app.get('/') assert 'current value is: 1' in res res = app2.get('/') assert 'current value is: 1' in res res = app2.get('/') res = app2.get('/') res = app2.get('/') res2 = app.get('/') assert 'current value is: 2' in res2 assert 'current value is: 4' in res def test_nosave(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/nosave') assert 'current value is: 1' in res res = app.get('/nosave') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res def test_revert(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_auto_app, auto=True, **options)) res = app.get('/nosave') assert 'current value is: 0' in res res = app.get('/nosave') assert 'current value is: 0' in res res = app.get('/') assert 'current value is: 1' in res assert [] == res.headers.getall('Set-Cookie') res = app.get('/') assert [] == res.headers.getall('Set-Cookie') assert 'current value is: 2' in res # Finally, ensure that reverting shows the proper one res = app.get('/nosave') assert [] == res.headers.getall('Set-Cookie') assert 'current value is: 2' in res def test_load_session_by_id(): options = {'session.data_dir':'./cache', 'session.secret':'blah'} app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') res = app.get('/') assert 'current value is: 3' in res old_id = re.sub(r'^.*?session id is (\S+)$', r'\1', res.body, re.M) # Clear the cookies and do a new request app = TestApp(SessionMiddleware(simple_app, **options)) res = app.get('/') assert 'current value is: 1' in res # Load a bogus session to see that its not there res = app.get('/', extra_environ={'SESSION_ID':'jil2j34il2j34ilj23'}) assert 'No session id of' in res # Saved session was at 3, now it'll be 4 res = app.get('/', extra_environ={'SESSION_ID':old_id}) assert 'current value is: 4' in res # Prior request is now up to 2 res = app.get('/') assert 'current value is: 2' in res if __name__ == '__main__': from paste import httpserver wsgi_app = SessionMiddleware(simple_app, {}) httpserver.serve(wsgi_app, host='127.0.0.1', port=8080) Beaker-1.6.3/tests/test_memcached.py0000664000076500000240000002322211660273613017272 0ustar benstaff00000000000000# coding: utf-8 import mock import os from beaker.cache import clsmap, Cache, util from beaker.middleware import CacheMiddleware, SessionMiddleware from beaker.exceptions import InvalidCacheBackendError from nose import SkipTest import unittest try: from webtest import TestApp except ImportError: TestApp = None try: from beaker.ext import memcached client = memcached._load_client() except InvalidCacheBackendError: raise SkipTest("an appropriate memcached backend is not installed") mc_url = '127.0.0.1:11211' c =client.Client([mc_url]) c.set('x', 'y') if not c.get('x'): raise SkipTest("Memcached is not running at %s" % mc_url) def teardown(): import shutil shutil.rmtree('./cache', True) def simple_session_app(environ, start_response): session = environ['beaker.session'] sess_id = environ.get('SESSION_ID') if environ['PATH_INFO'].startswith('/invalid'): # Attempt to access the session id = session.id session['value'] = 2 else: if sess_id: session = session.get_by_id(sess_id) if not session: start_response('200 OK', [('Content-type', 'text/plain')]) return ["No session id of %s found." % sess_id] if not session.has_key('value'): session['value'] = 0 session['value'] += 1 if not environ['PATH_INFO'].startswith('/nosave'): session.save() start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %d, session id is %s' % (session['value'], session.id)] def simple_app(environ, start_response): extra_args = {} clear = False if environ.get('beaker.clear'): clear = True extra_args['type'] = 'ext:memcached' extra_args['url'] = mc_url extra_args['data_dir'] = './cache' cache = environ['beaker.cache'].get_cache('testcache', **extra_args) if clear: cache.clear() try: value = cache.get_value('value') except: value = 0 cache.set_value('value', value+1) start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %s' % cache.get_value('value')] def using_none_app(environ, start_response): extra_args = {} clear = False if environ.get('beaker.clear'): clear = True extra_args['type'] = 'ext:memcached' extra_args['url'] = mc_url extra_args['data_dir'] = './cache' cache = environ['beaker.cache'].get_cache('testcache', **extra_args) if clear: cache.clear() try: value = cache.get_value('value') except: value = 10 cache.set_value('value', None) start_response('200 OK', [('Content-type', 'text/plain')]) return ['The current value is: %s' % value] def cache_manager_app(environ, start_response): cm = environ['beaker.cache'] cm.get_cache('test')['test_key'] = 'test value' start_response('200 OK', [('Content-type', 'text/plain')]) yield "test_key is: %s\n" % cm.get_cache('test')['test_key'] cm.get_cache('test').clear() try: test_value = cm.get_cache('test')['test_key'] except KeyError: yield "test_key cleared" else: yield "test_key wasn't cleared, is: %s\n" % \ cm.get_cache('test')['test_key'] @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_session(): app = TestApp(SessionMiddleware(simple_session_app, data_dir='./cache', type='ext:memcached', url=mc_url)) res = app.get('/') assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_session_invalid(): app = TestApp(SessionMiddleware(simple_session_app, data_dir='./cache', type='ext:memcached', url=mc_url)) res = app.get('/invalid', headers=dict(Cookie='beaker.session.id=df7324911e246b70b5781c3c58328442; Path=/')) assert 'current value is: 2' in res def test_has_key(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache assert not cache.has_key("foo") assert "foo" not in cache cache.remove_value("test") assert not cache.has_key("test") def test_dropping_keys(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') cache.set_value('test', 20) cache.set_value('fred', 10) assert cache.has_key('test') assert 'test' in cache assert cache.has_key('fred') # Directly nuke the actual key, to simulate it being removed by memcached cache.namespace.mc.delete('test_test') assert not cache.has_key('test') assert cache.has_key('fred') # Nuke the keys dict, it might die, who knows cache.namespace.mc.delete('test:keys') assert cache.has_key('fred') # And we still need clear to work, even if it won't work well cache.clear() def test_deleting_keys(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') cache.set_value('test', 20) # Nuke the keys dict, it might die, who knows cache.namespace.mc.delete('test:keys') assert cache.has_key('test') # make sure we can still delete keys even though our keys dict got nuked del cache['test'] assert not cache.has_key('test') def test_has_key_multicache(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') assert cache.has_key("test") def test_unicode_keys(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') o = object() cache.set_value(u'hiŏ', o) assert u'hiŏ' in cache assert u'hŏa' not in cache cache.remove_value(u'hiŏ') assert u'hiŏ' not in cache def test_spaces_in_unicode_keys(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') o = object() cache.set_value(u'hi ŏ', o) assert u'hi ŏ' in cache assert u'hŏa' not in cache cache.remove_value(u'hi ŏ') assert u'hi ŏ' not in cache def test_spaces_in_keys(): cache = Cache('test', data_dir='./cache', url=mc_url, type='ext:memcached') cache.set_value("has space", 24) assert cache.has_key("has space") assert 24 == cache.get_value("has space") cache.set_value("hasspace", 42) assert cache.has_key("hasspace") assert 42 == cache.get_value("hasspace") @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_increment(): app = TestApp(CacheMiddleware(simple_app)) res = app.get('/', extra_environ={'beaker.clear':True}) assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res app = TestApp(CacheMiddleware(simple_app)) res = app.get('/', extra_environ={'beaker.clear':True}) assert 'current value is: 1' in res res = app.get('/') assert 'current value is: 2' in res res = app.get('/') assert 'current value is: 3' in res @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_cache_manager(): app = TestApp(CacheMiddleware(cache_manager_app)) res = app.get('/') assert 'test_key is: test value' in res assert 'test_key cleared' in res @util.skip_if(lambda: TestApp is None, "webtest not installed") def test_store_none(): app = TestApp(CacheMiddleware(using_none_app)) res = app.get('/', extra_environ={'beaker.clear':True}) assert 'current value is: 10' in res res = app.get('/') assert 'current value is: None' in res class TestPylibmcInit(unittest.TestCase): def setUp(self): from beaker.ext import memcached try: import pylibmc as memcache except: import memcache memcached._client_libs['pylibmc'] = memcached.pylibmc = memcache from contextlib import contextmanager class ThreadMappedPool(dict): "a mock of pylibmc's ThreadMappedPool" def __init__(self, master): self.master = master @contextmanager def reserve(self): yield self.master memcache.ThreadMappedPool = ThreadMappedPool def test_uses_pylibmc_client(self): from beaker.ext import memcached cache = Cache('test', data_dir='./cache', memcache_module='pylibmc', url=mc_url, type="ext:memcached") assert isinstance(cache.namespace, memcached.PyLibMCNamespaceManager) def test_dont_use_pylibmc_client(self): from beaker.ext.memcached import _load_client load_mock = mock.Mock() load_mock.return_value = _load_client('memcache') with mock.patch('beaker.ext.memcached._load_client', load_mock): cache = Cache('test', data_dir='./cache', url=mc_url, type="ext:memcached") assert not isinstance(cache.namespace, memcached.PyLibMCNamespaceManager) assert isinstance(cache.namespace, memcached.MemcachedNamespaceManager) def test_client(self): cache = Cache('test', data_dir='./cache', url=mc_url, type="ext:memcached") o = object() cache.set_value("test", o) assert cache.has_key("test") assert "test" in cache assert not cache.has_key("foo") assert "foo" not in cache cache.remove_value("test") assert not cache.has_key("test") Beaker-1.6.3/tests/test_namespacing_files/0000775000076500000240000000000011723541367020464 5ustar benstaff00000000000000Beaker-1.6.3/tests/test_namespacing_files/__init__.py0000664000076500000240000000000011723457647022572 0ustar benstaff00000000000000Beaker-1.6.3/tests/test_namespacing_files/namespace_get.py0000664000076500000240000000067611723457647023651 0ustar benstaff00000000000000from beaker.cache import CacheManager from beaker.util import parse_cache_config_options from datetime import datetime defaults = {'cache.data_dir':'./cache', 'cache.type':'dbm', 'cache.expire': 60, 'cache.regions': 'short_term'} cache = CacheManager(**parse_cache_config_options(defaults)) def get_cached_value(): @cache.region('short_term', 'test_namespacing') def get_value(): return datetime.now() return get_value() Beaker-1.6.3/tests/test_namespacing_files/namespace_go.py0000664000076500000240000000116111723457647023465 0ustar benstaff00000000000000import time def go(): import namespace_get a = namespace_get.get_cached_value() time.sleep(0.3) b = namespace_get.get_cached_value() time.sleep(0.3) import test_namespacing_files.namespace_get c = test_namespacing_files.namespace_get.get_cached_value() time.sleep(0.3) d = test_namespacing_files.namespace_get.get_cached_value() print a print b print c print d assert a==b, 'Basic caching problem - should never happen' assert c==d, 'Basic caching problem - should never happen' assert a==c, 'Namespaces not consistent when using different import paths' Beaker-1.6.3/tests/test_syncdict.py0000664000076500000240000000362711542263524017212 0ustar benstaff00000000000000from beaker.util import SyncDict, WeakValuedRegistry import random, time, weakref import threading class Value(object): values = {} def do_something(self, id): Value.values[id] = self def stop_doing_something(self, id): del Value.values[id] mutex = threading.Lock() def create(id): assert not Value.values, "values still remain" global totalcreates totalcreates += 1 return Value() def threadtest(s, id): print "create thread %d starting" % id global running global totalgets while running: try: value = s.get('test', lambda: create(id)) value.do_something(id) except Exception, e: print "Error", e running = False break else: totalgets += 1 time.sleep(random.random() * .01) value.stop_doing_something(id) del value time.sleep(random.random() * .01) def runtest(s): global values values = {} global totalcreates totalcreates = 0 global totalgets totalgets = 0 global running running = True threads = [] for id_ in range(1, 20): t = threading.Thread(target=threadtest, args=(s, id_)) t.start() threads.append(t) for i in range(0, 10): if not running: break time.sleep(1) failed = not running running = False for t in threads: t.join() assert not failed, "test failed" print "total object creates %d" % totalcreates print "total object gets %d" % totalgets def test_dict(): # normal dictionary test, where we will remove the value # periodically. the number of creates should be equal to # the number of removes plus one. print "\ntesting with normal dict" runtest(SyncDict()) def test_weakdict(): print "\ntesting with weak dict" runtest(WeakValuedRegistry()) Beaker-1.6.3/tests/test_synchronizer.py0000664000076500000240000000121511542263524020116 0ustar benstaff00000000000000from beaker.synchronization import * # TODO: spawn threads, test locking. def teardown(): import shutil shutil.rmtree('./cache', True) def test_reentrant_file(): sync1 = file_synchronizer('test', lock_dir='./cache') sync2 = file_synchronizer('test', lock_dir='./cache') sync1.acquire_write_lock() sync2.acquire_write_lock() sync2.release_write_lock() sync1.release_write_lock() def test_null(): sync = null_synchronizer() assert sync.acquire_write_lock() sync.release_write_lock() def test_mutex(): sync = mutex_synchronizer('someident') sync.acquire_write_lock() sync.release_write_lock()