myghtyutils-0.52/0000755000175000017500000000000010515076431013042 5ustar malexmalexmyghtyutils-0.52/ez_setup.py0000666000175000017500000001714510460004624015260 0ustar malexmalex#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c1" DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', } import sys, os def _validate_md5(egg_name, data): if egg_name in md5_data: from md5 import md5 digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ try: import setuptools if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) except ImportError: egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg import pkg_resources try: pkg_resources.require("setuptools>="+version) except pkg_resources.VersionConflict: # XXX could we install in a subprocess here? print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first." ) % version sys.exit(2) def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: import tempfile, shutil tmpdir = tempfile.mkdtemp(prefix="easy_install-") try: egg = download_setuptools(version, to_dir=tmpdir, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: shutil.rmtree(tmpdir) else: if setuptools.__version__ == '0.0.1': # tell the user to uninstall obsolete version use_setuptools(version) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re from md5 import md5 for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:]) myghtyutils-0.52/LICENSE0000666000175000017500000000226310460506136014055 0ustar malexmalexThis is the MIT license: http://www.opensource.org/licenses/mit-license.php Copyright (c) 2004, 2005, 2006 Michael Bayer and contributors. Myghty is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.myghtyutils-0.52/PKG-INFO0000666000175000017500000000246610462270030014143 0ustar malexmalexMetadata-Version: 1.0 Name: MyghtyUtils Version: 0.52 Summary: Container and Utility Functions from the Myghty Template Framework Home-page: http://www.myghty.org Author: Mike Bayer Author-email: mike@myghty.org License: MIT License Description: This is the set of utility classes used by Myghty templating. Included are: container - the Containment system providing back-end neutral key/value storage, with support for in-memory, DBM files, flat files, and memcached buffer - some functions for augmenting file objects util - various utility functions and objects synchronizer - provides many reader/single writer synchronization using either thread mutexes or lockfiles session - provides a Session interface built upon the Container, similar interface to mod_python session. Currently needs a mod_python-like request object, this should be changed to something more generic. `Development SVN `_ Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python myghtyutils-0.52/README0000666000175000017500000000122710460506136013727 0ustar malexmalex--- MyghtyUtils --- This is the set of utility classes used by Myghty templating. Included are: container - the Containment system providing back-end neutral key/value storage, with support for in-memory, DBM files, flat files, and memcached buffer - some functions for augmenting file objects util - various utility functions and objects synchronizer - provides many reader/single writer synchronization using either thread mutexes or lockfiles session - provides a Session interface built upon the Container, similar interface to mod_python session. Currently needs a mod_python-like request object, this should be changed to something more generic. myghtyutils-0.52/setup.cfg0000666000175000017500000000010010462270030014646 0ustar malexmalex[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 myghtyutils-0.52/setup.py0000666000175000017500000000254410460506136014564 0ustar malexmalexfrom ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages setup(name = "MyghtyUtils", version = "0.52", description = "Container and Utility Functions from the Myghty Template Framework", author = "Mike Bayer", author_email = "mike@myghty.org", url = "http://www.myghty.org", package_dir = {'':'lib'}, packages = find_packages('lib'), license = "MIT License", long_description = """\ This is the set of utility classes used by Myghty templating. Included are: container - the Containment system providing back-end neutral key/value storage, with support for in-memory, DBM files, flat files, and memcached buffer - some functions for augmenting file objects util - various utility functions and objects synchronizer - provides many reader/single writer synchronization using either thread mutexes or lockfiles session - provides a Session interface built upon the Container, similar interface to mod_python session. Currently needs a mod_python-like request object, this should be changed to something more generic. `Development SVN `_ """, classifiers = ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", ], ) myghtyutils-0.52/lib/0000755000175000017500000000000010515076431013610 5ustar malexmalexmyghtyutils-0.52/lib/myghtyutils/0000755000175000017500000000000010515076431016212 5ustar malexmalexmyghtyutils-0.52/lib/myghtyutils/buffer.py0000666000175000017500000000650410460506134020043 0ustar malexmalex# $Id: buffer.py,v 1.1.1.1 2006/01/12 20:54:38 classic Exp $ # buffer.py - string buffering functions for Myghty # Copyright (C) 2004, 2005 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Myghty and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # # """Buffer is an output handling object which corresponds to the Python file object interface.""" from util import * import StringIO import sys, string class BufferDecorator(object): """allows flexible combinations of buffers. """ def __init__(self, buffer): self.buffer = buffer def __getattr__(self, name): return getattr(self.buffer, name) def __repr__(self): return "BufferDecorator, enclosing %s." % repr(self.buffer) class FunctionBuffer(BufferDecorator): def __init__(self, func): self.func = func def write(self, s): self.func(s) class LinePrinter(BufferDecorator): def write(self, s): self.buffer.write(s + "\n") def writelines(self, list): self.buffer.writelines([s + "\n" for s in list]) class HierarchicalBuffer(BufferDecorator): """a buffer that can create child buffers or itself be attached to a parent buffer""" def __init__(self, buffer = None, parent = None, ignore_flush = False, ignore_clear = False, filter = None): self.parent = parent if buffer is None and parent is not None: BufferDecorator.__init__(self, parent.buffer) else: BufferDecorator.__init__(self, buffer) self.ignore_flush = ignore_flush self.ignore_clear = ignore_clear self.filter = filter def add_child(self, buffer): return HierarchicalBuffer(buffer, parent = self) def truncate(self, size=None): if not self.ignore_clear: return self.buffer.truncate(size) def write(self, s): if self.filter: self.buffer.write(self.filter(s)) else: self.buffer.write(s) def writelines(self, list): if self.filter: self.buffer.write(map(self.filter, list)) else: self.buffer.write(list) def flush(self): if not self.ignore_flush: if self.parent: self.buffer.seek(0) self.parent.write(self.buffer.read()) self.buffer.truncate(0) else: self.buffer.flush() def __repr__(self): return "Hierarchical Buffer, enclosing %s. Parent:\n %s" % (repr(self.buffer), repr(self.parent)) class LogFormatter(BufferDecorator): def __init__(self, buffer, identifier, id_threads = False, autoflush = True): BufferDecorator.__init__(self, buffer) self.identifier = identifier self.id_threads = id_threads self.autoflush = autoflush def _formatline(self, s): if self.id_threads: return "[%s] [pid:%d tid:%d] %s" % (self.identifier, pid(), thread_id(), string.rstrip(s)) else: return "[%s] %s" % (self.identifier, string.rstrip(s)) def write(self, s): self.buffer.write(self._formatline(s)) if self.autoflush: self.flush() def writelines(self, lines): for line in lines: self.buffer.write(self._formatline(line)) myghtyutils-0.52/lib/myghtyutils/container.py0000666000175000017500000006175410460506136020566 0ustar malexmalex# $Id: container.py,v 1.1.1.1 2006/01/12 20:54:38 classic Exp $ # container.py - file/memory data containment API and implementation for Myghty # Copyright (C) 2004, 2005 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Myghty and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # # import os.path, re, string, time, weakref, sys from util import * from synchronization import * import cPickle __all__ = ['NamespaceContext', 'ContainerContext', 'Container', 'MemoryContainer', 'DBMContainer', 'NamespaceManager', 'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer', 'FileNamespaceManager', 'CreationAbortedError', 'container_registry'] def container_registry(name, classtype): if name.startswith('ext:'): name = name[4:] modname = "myghty.ext." + name mod = getattr(__import__(modname).ext, name) else: mod = sys.modules[__name__] cname = string.capitalize(name) + classtype return getattr(mod, cname) class NamespaceContext: """initial context supplied to NamespaceManagers""" def __init__(self, log_file = None): self.log_file = log_file def debug(self, message, nsm, container = None): if self.log_file is not None: if container is not None: message = "[%s:%s:%s] %s\n" % (container.__class__.__name__, nsm.namespace, container.key, message) else: message = "[%s] %s\n" % (nsm.namespace, message) self.log_file.write(message) class NamespaceManager: """handles dictionary operations and locking for a namespace of values. the implementation for setting and retrieving the namespace data is handled by subclasses. acts as a service for a Container, which stores and retreives a particular key from the namespace, coupled with a "stored time" setting. NamespaceManager may be used alone, or may be privately managed by one or more Container objects. Container objects provide per-key services like automatic expiration and recreation of individual keys and can manange many types of NamespaceManagers for one or more particular namespaces simultaneously. the class supports locking relative to its name. many namespacemanagers within multiple threads or across multiple processes must read/write synchronize their access to the actual dictionary of data referenced by the name. """ def __init__(self, context , namespace, **params): # caution: this might create a circular reference # (which was giving me very weird gc() problems # in previous configurations) self.context = context self.namespace = namespace self.openers = 0 self.mutex = _threading.Lock() def do_acquire_read_lock(self): raise NotImplementedError() def do_release_read_lock(self): raise NotImplementedError() def do_acquire_write_lock(self, wait = True): raise NotImplementedError() def do_release_write_lock(self): raise NotImplementedError() def do_open(self, flags): raise NotImplementedError() def do_close(self): raise NotImplementedError() def do_remove(self): """removes this namespace from wherever it is stored""" raise NotImplementedError() def has_key(self, key): return self.__contains__(key) def __getitem__(self, key): raise NotImplementedError() def __setitem__(self, key, value): raise NotImplementedError() def __contains__(self, key): raise NotImplementedError() def __delitem__(self, key): raise NotImplementedError() def keys(self): raise NotImplementedError() def acquire_read_lock(self): """acquires a read lock for this namespace, and insures that the datasource has been opened for reading if it is not already opened. acquire/release supports reentrant/nested operation.""" self.do_acquire_read_lock() self.open('r', checkcount = True) def release_read_lock(self): """releases the read lock for this namespace, and possibly closes the datasource, if it was opened as a product of the read lock's acquire/release block. acquire/release supports reentrant/nested operation.""" self.close(checkcount = True) self.do_release_read_lock() def acquire_write_lock(self, wait = True): """acquires a write lock for this namespace, and insures that the datasource has been opened for writing if it is not already opened. acquire/release supports reentrant/nested operation.""" r = self.do_acquire_write_lock(wait) if (wait or r): self.open('c', checkcount = True) return r def release_write_lock(self): """releases the write lock for this namespace, and possibly closes the datasource, if it was opened as a product of the write lock's acquire/release block. acquire/release supports reentrant/nested operation.""" self.close(checkcount = True) self.do_release_write_lock() def open(self, flags, checkcount = False): """opens the datasource for this namespace. the checkcount flag indicates an "opened" counter should be checked for zero before performing the open operation, which is incremented by one regardless.""" self.mutex.acquire() try: if checkcount: if self.openers == 0: self.do_open(flags) self.openers += 1 else: self.do_open(flags) self.openers = 1 finally: self.mutex.release() def close(self, checkcount = False): """closes the datasource for this namespace. the checkcount flag indicates an "opened" counter should be checked for zero before performing the close operation, which is otherwise decremented by one.""" self.mutex.acquire() try: if checkcount: self.openers -= 1 if self.openers == 0: self.do_close() else: if self.openers > 0: self.do_close() self.openers = 0 finally: self.mutex.release() def remove(self): self.do_acquire_write_lock() try: self.close(checkcount = False) self.do_remove() finally: self.do_release_write_lock() def debug(self, message, container = None): self.context.debug(message, self, container) class ContainerContext(NamespaceContext): """initial context supplied to Containers. Keeps track of namespacemangers keyed off of namespace names and container types. also keeps namespacemanagers thread local for nsm instances that arent threadsafe (i.e. gdbm) """ def __init__(self, log_file = None): NamespaceContext.__init__(self, log_file) self.registry = {} def get_namespace_manager(self, namespace, container, **params): key = str(_thread.get_ident()) + "|" + container.__class__.__name__ + "|" + namespace try: return self.registry[key] except KeyError: return self.registry.setdefault(key, self.create_nsm(namespace, container, **params)) def create_nsm(self, namespace, container, **params): nsm = container.do_create_namespace_manager(context = self, namespace = namespace, **params) return nsm class Container: """represents a value, its stored time, and a value creation function corresponding to a particular key in a particular namespace. handles storage and retrieval of its value via a single NamespaceManager, as well as handling expiration times and an optional creation function that can create or recreate its value when needed. the Container performs locking operations on the NamespaceManager, including a pretty intricate one for get_value with a creation function, so its best not to pass a NamespaceManager that has been externally locked or open, as it stands currently (i hope to improve on this). Managing multiple Containers for a set of keys within a certain namespace allows management of multiple namespace implementations, expiration properties, and thread/process synchronization, on a per-key basis. """ def __init__(self, key, context, namespace, createfunc = None, expiretime = None, starttime = None, **params): """create a container that stores one cached object. createfunc - a function that will create the value. this function is called when value is None or expired. the createfunc call is also synchronized against any other threads or processes calling this cache. expiretime - time in seconds that the item expires. """ self.key = key self.createfunc = createfunc self.expiretime = expiretime self.starttime = starttime self.storedtime = -1 self.namespacemanager = context.get_namespace_manager(namespace, self, **params) self.do_init(**params) def acquire_read_lock(self): self.namespacemanager.acquire_read_lock() def release_read_lock(self): self.namespacemanager.release_read_lock() def acquire_write_lock(self, wait = True): return self.namespacemanager.acquire_write_lock(wait) def release_write_lock(self): self.namespacemanager.release_write_lock() def debug(self, message): self.namespacemanager.debug(message, self) def do_create_namespace_manager(self, context, namespace, **params): """subclasses should return a newly created instance of their corresponding NamespaceManager.""" raise NotImplementedError() def do_init(self, **params): """subclasses can perform general initialization. optional template method.""" pass def do_get_value(self): """retrieves the native stored value of this container, regardless of if its expired, or raise KeyError if no value is defined. optionally a template method.""" return self.namespacemanager[self.key] def do_set_value(self, value): """sets the raw value in this container. optionally a template method.""" self.namespacemanager[self.key] = value def do_clear_value(self): """clears the value of this container. subsequent do_get_value calls should raise KeyError. optionally a template method.""" if self.namespacemanager.has_key(self.key): del self.namespacemanager[self.key] def has_value(self): """returns true if the container has a value stored, regardless of it being expired or not. optionally a template method.""" self.acquire_read_lock() try: return self.namespacemanager.has_key(self.key) finally: self.release_read_lock() def lock_createfunc(self, wait = True): """required template method that locks this container's namespace and key to allow a single execution of the creation function.""" raise NotImplementedError() def unlock_createfunc(self): """required template method that unlocks this container's namespace and key when the creation function is complete.""" raise NotImplementedError() def can_have_value(self): """returns true if this container either has a non-expired value, or is capable of creating one via a creation function""" return self.has_current_value() or self.createfunc is not None def has_current_value(self): """returns true if this container has a non-expired value""" return self.has_value() and not self.is_expired() def stored_time(self): return self.storedtime def get_namespace_manager(self): return self.namespacemanager def get_all_namespaces(self): return self.namespacemanager.context._container_namespaces.values() def is_expired(self): """returns true if this container's value is expired, based on the last time get_value was called.""" return ( ( self.storedtime == -1 ) or ( self.starttime is not None and self.storedtime < self.starttime ) or ( self.expiretime is not None and time.time() >= self.expiretime + self.storedtime ) ) def get_value(self): """get_value performs a get with expiration checks on its namespacemanager. if a creation function is specified, a new value will be created if the existing value is nonexistent or has expired.""" self.acquire_read_lock() try: has_value = self.has_value() if has_value: [self.storedtime, value] = self.do_get_value() if not self.is_expired(): return value if not self.can_have_value(): raise KeyError(self.key) finally: self.release_read_lock() has_createlock = False if has_value: if not self.lock_createfunc(wait = False): self.debug("get_value returning old value while new one is created") return value else: self.debug("lock_creatfunc (didnt wait)") has_createlock = True if not has_createlock: self.debug("lock_createfunc (waiting)") self.lock_createfunc() self.debug("lock_createfunc (waited)") try: # see if someone created the value already self.acquire_read_lock() try: if self.has_value(): [self.storedtime, value] = self.do_get_value() if not self.is_expired(): return value finally: self.release_read_lock() self.debug("get_value creating new value") try: v = self.createfunc() except CreationAbortedError, e: raise self.set_value(v) return v finally: self.unlock_createfunc() self.debug("unlock_createfunc") def set_value(self, value): self.acquire_write_lock() try: self.storedtime = time.time() self.debug("set_value stored time %d" % self.storedtime) self.do_set_value([self.storedtime, value]) finally: self.release_write_lock() def clear_value(self): self.acquire_write_lock() try: self.debug("clear_value") self.do_clear_value() self.storedtime = -1 finally: self.release_write_lock() class CreationAbortedError(Exception): """a special exception that allows a creation function to abort what its doing""" def __init__(self, **params): self.params = params class MemoryNamespaceManager(NamespaceManager): namespaces = SyncDict(_threading.Lock(), {}) def __init__(self, context, namespace, **params): NamespaceManager.__init__(self, context, namespace, **params) self.lock = Synchronizer(identifier = "memorycontainer/namespacelock/%s" % self.namespace, use_files = False) self.dictionary = MemoryNamespaceManager.namespaces.get(self.namespace, lambda: {}) def do_acquire_read_lock(self): self.lock.acquire_read_lock() def do_release_read_lock(self): self.lock.release_read_lock() def do_acquire_write_lock(self, wait = True): return self.lock.acquire_write_lock(wait) def do_release_write_lock(self): self.lock.release_write_lock() # the open and close methods are totally overridden to eliminate # the unnecessary "open count" computation involved def open(self, *args, **params):pass def close(self, *args, **params):pass def __getitem__(self, key): return self.dictionary[key] def __contains__(self, key): return self.dictionary.__contains__(key) def has_key(self, key): return self.dictionary.__contains__(key) def __setitem__(self, key, value):self.dictionary[key] = value def __delitem__(self, key): del self.dictionary[key] def do_remove(self): self.dictionary.clear() def keys(self): return self.dictionary.keys() class MemoryContainer(Container): def do_init(self, **params): self.funclock = None def do_create_namespace_manager(self, context, namespace, **params): return MemoryNamespaceManager(context, namespace, **params) def lock_createfunc(self, wait = True): if self.funclock is None: self.funclock = NameLock(identifier = "memorycontainer/funclock/%s/%s" % (self.namespacemanager.namespace, self.key), reentrant = True) return self.funclock.acquire(wait) def unlock_createfunc(self): self.funclock.release() class DBMNamespaceManager(NamespaceManager): def __init__(self, context, namespace, dbmmodule = None, data_dir = None, dbm_dir = None, lock_dir = None, digest_filenames = True, **params): NamespaceManager.__init__(self, context, namespace, **params) if dbm_dir is not None: self.dbm_dir = dbm_dir elif data_dir is None: raise "data_dir or dbm_dir is required" else: self.dbm_dir = data_dir + "/container_dbm" if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise "data_dir or lock_dir is required" else: self.lock_dir = data_dir + "/container_dbm_lock" if dbmmodule is None: import anydbm self.dbmmodule = anydbm else: self.dbmmodule = dbmmodule verify_directory(self.dbm_dir) verify_directory(self.lock_dir) self.dbm = None self.lock = Synchronizer(identifier = self.namespace, use_files = True, lock_dir = self.lock_dir, digest_filenames = digest_filenames) self.encpath = EncodedPath(root = self.dbm_dir, identifiers = [self.namespace], digest = digest_filenames, extension = '.dbm') self.file = self.encpath.path self.debug("data file %s" % self.file) self._checkfile() def file_exists(self, file): if os.access(file, os.F_OK): return True else: for ext in ('db', 'dat', 'pag', 'dir'): if os.access(file + os.extsep + ext, os.F_OK): return True return False def _checkfile(self): if not self.file_exists(self.file): g = self.dbmmodule.open(self.file, 'c') g.close() def get_filenames(self): list = [] if os.access(self.file, os.F_OK): list.append(self.file) for ext in ('pag', 'dir', 'db', 'dat'): if os.access(self.file + os.extsep + ext, os.F_OK): list.append(self.file + os.extsep + ext) return list def do_acquire_read_lock(self): self.lock.acquire_read_lock() def do_release_read_lock(self): self.lock.release_read_lock() def do_acquire_write_lock(self, wait = True): return self.lock.acquire_write_lock(wait) def do_release_write_lock(self): self.lock.release_write_lock() def do_open(self, flags): # caution: apparently gdbm handles arent threadsafe, they # are using flock(), and i would rather not have knowledge # of the "unlock" 'u' option just for that one dbm module. # therefore, neither is an individual instance of # this namespacemanager (of course, multiple nsm's # can exist for each thread). self.debug("opening dbm file %s" % self.file) try: self.dbm = self.dbmmodule.open(self.file, flags) except: self.encpath.verify_directory() self._checkfile() self.dbm = self.dbmmodule.open(self.file, flags) def do_close(self): if self.dbm is not None: self.debug("closing dbm file %s" % self.file) self.dbm.close() def do_remove(self): for f in self.get_filenames(): os.remove(f) def __getitem__(self, key): return cPickle.loads(self.dbm[key]) def __contains__(self, key): return self.dbm.has_key(key) def __setitem__(self, key, value): self.dbm[key] = cPickle.dumps(value) def __delitem__(self, key): del self.dbm[key] def keys(self): return self.dbm.keys() class DBMContainer(Container): def do_init(self, **params): self.funclock = None def do_create_namespace_manager(self, context, namespace, **params): return DBMNamespaceManager(context, namespace, **params) def lock_createfunc(self, wait = True): if self.funclock is None: self.funclock = Synchronizer(identifier = "dbmcontainer/funclock/%s" % self.namespacemanager.namespace, use_files = True, lock_dir = self.namespacemanager.lock_dir) return self.funclock.acquire_write_lock(wait) def unlock_createfunc(self): self.funclock.release_write_lock() DbmNamespaceManager = DBMNamespaceManager DbmContainer = DBMContainer class FileNamespaceManager(NamespaceManager): def __init__(self, context, namespace, data_dir = None, file_dir = None, lock_dir = None, digest_filenames = True, **params): NamespaceManager.__init__(self, context, namespace, **params) if file_dir is not None: self.file_dir = file_dir elif data_dir is None: raise "data_dir or file_dir is required" else: self.file_dir = data_dir + "/container_file" if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise "data_dir or lock_dir is required" else: self.lock_dir = data_dir + "/container_file_lock" verify_directory(self.file_dir) verify_directory(self.lock_dir) self.lock = Synchronizer(identifier = self.namespace, use_files = True, lock_dir = self.lock_dir, digest_filenames = digest_filenames) self.file = EncodedPath(root = self.file_dir, identifiers = [self.namespace], digest = digest_filenames, extension = '.cache').path self.hash = {} self.debug("data file %s" % self.file) def file_exists(self, file): if os.access(file, os.F_OK): return True else: return False def do_acquire_read_lock(self): self.lock.acquire_read_lock() def do_release_read_lock(self): self.lock.release_read_lock() def do_acquire_write_lock(self, wait = True): return self.lock.acquire_write_lock(wait) def do_release_write_lock(self): self.lock.release_write_lock() def do_open(self, flags): if self.file_exists(self.file): fh = open(self.file, 'r') self.hash = cPickle.load(fh) fh.close() self.flags = flags def do_close(self): if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): fh = open(self.file, 'w') cPickle.dump(self.hash, fh) fh.close() self.flags = None def do_remove(self): os.remove(self.file) self.hash = {} def __getitem__(self, key): return self.hash[key] def __contains__(self, key): return self.hash.has_key(key) def __setitem__(self, key, value): self.hash[key] = value def __delitem__(self, key): del self.hash[key] def keys(self): return self.hash.keys() class FileContainer(Container): def do_init(self, **params): self.funclock = None def do_create_namespace_manager(self, context, namespace, **params): return FileNamespaceManager(context, namespace, **params) def lock_createfunc(self, wait = True): if self.funclock is None: self.funclock = Synchronizer(identifier = "filecontainer/funclock/%s" % self.namespacemanager.namespace, use_files = True, lock_dir = self.namespacemanager.lock_dir) return self.funclock.acquire_write_lock(wait) def unlock_createfunc(self): self.funclock.release_write_lock() myghtyutils-0.52/lib/myghtyutils/session.py0000666000175000017500000002166510460506136020264 0ustar malexmalex# $Id: session.py 2041 2006-02-05 19:02:14Z zzzeek $ # session.py - session management for Myghty # Copyright (C) 2004, 2005 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Myghty and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # # import Cookie import hmac, md5, time, random, os, re, UserDict, datetime from container import * from util import * __all__ = ['SignedCookie', 'Session', 'MyghtySessionArgs'] class SignedCookie(Cookie.BaseCookie): "extends python cookie to give digital signature support" def __init__(self, secret, input=None): self.secret = secret Cookie.BaseCookie.__init__(self, input) def value_decode(self, val): sig = val[0:32] value = val[32:] if hmac.new(self.secret, value).hexdigest() != sig: return None, val return val[32:], val def value_encode(self, val): return val, ("%s%s" % (hmac.new(self.secret, val).hexdigest(), val)) class Session(UserDict.DictMixin): "session object that uses container package for storage" def __init__(self, request, id = None, invalidate_corrupt = False, use_cookies = True, type = None, data_dir = None, key = 'myghty_session_id', timeout = None, cookie_expires=True, secret = None, log_file = None, namespace_class = None, **params): if type is None: if data_dir is None: self.type = 'memory' else: self.type = 'file' else: self.type = type if namespace_class is None: self.namespace_class = container_registry(self.type, 'NamespaceManager') else: self.namespace_class = namespace_class self.params = params self.request = request self.data_dir = data_dir self.key = key self.timeout = timeout self.use_cookies = use_cookies self.cookie_expires = cookie_expires self.log_file = log_file self.was_invalidated = False self.secret = secret self.id = id if self.use_cookies: try: cookieheader = request.headers_in['cookie'] except KeyError: cookieheader = '' if secret is not None: try: self.cookie = SignedCookie(secret, input = cookieheader) except Cookie.CookieError: self.cookie = SignedCookie(secret, input = None) else: self.cookie = Cookie.SimpleCookie(input = cookieheader) if self.id is None and self.cookie.has_key(self.key): self.id = self.cookie[self.key].value if self.id is None: self._create_id() else: self.is_new = False try: self.load() except: if invalidate_corrupt: self.invalidate() else: raise def _create_id(self): self.id = md5.new( md5.new("%f%s%f%d" % (time.time(), id({}), random.random(), os.getpid()) ).hexdigest(), ).hexdigest() self.is_new = True if self.use_cookies: self.cookie[self.key] = self.id self.cookie[self.key]['path'] = '/' if self.cookie_expires is not True: if self.cookie_expires is False: expires = datetime.datetime.fromtimestamp( 0x7FFFFFFF ) elif isinstance(self.cookie_expires, datetime.timedelta): expires = datetime.datetime.today() + self.cookie_expires elif isinstance(self.cookie_expires, datetime.datetime): expires = self.cookie_expires else: raise ValueError("Invalid argument for cookie_expires: %s" % repr(self.cookie_expires)) self.cookie[self.key]['expires'] = expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" ) self.request.headers_out.add('set-cookie', self.cookie[self.key].output(header='')) created = property(lambda self: self.dict['_creation_time']) def delete(self): """deletes the persistent storage for this session, but remains valid. """ self.namespace.acquire_write_lock() try: for k in self.namespace.keys(): if not re.match(r'_creation_time|_accessed_time', k): del self.namespace[k] self.namespace['_accessed_time'] = time.time() finally: self.namespace.release_write_lock() def __getitem__(self, key): return self.dict.__getitem__(key) def __setitem__(self, key, value): self.dict.__setitem__(key, value) def __delitem__(self, key): del self.dict[key] def keys(self): return self.dict.keys() def __contains__(self, key): return self.dict.has_key(key) def has_key(self, key): return self.dict.has_key(key) def __iter__(self): return iter(self.dict.keys()) def iteritems(self): return self.dict.iteritems() def invalidate(self): "invalidates this session, creates a new session id, returns to the is_new state" namespace = self.namespace namespace.acquire_write_lock() try: namespace.remove() finally: namespace.release_write_lock() self.was_invalidated = True self._create_id() self.load() def load(self): "loads the data from this session from persistent storage" self.namespace = self.namespace_class(NamespaceContext(log_file = self.log_file), self.id, data_dir = self.data_dir, digest_filenames = False, **self.params) namespace = self.namespace namespace.acquire_write_lock() try: self.debug("session loading keys") self.dict = {} now = time.time() if not namespace.has_key('_creation_time'): namespace['_creation_time'] = now try: self.accessed = namespace['_accessed_time'] namespace['_accessed_time'] = now except KeyError: namespace['_accessed_time'] = self.accessed = now if self.timeout is not None and now - self.accessed > self.timeout: self.invalidate() else: for k in namespace.keys(): self.dict[k] = namespace[k] finally: namespace.release_write_lock() def save(self): "saves the data for this session to persistent storage" self.namespace.acquire_write_lock() try: self.debug("session saving keys") todel = [] for k in self.namespace.keys(): if not self.dict.has_key(k): todel.append(k) for k in todel: del self.namespace[k] for k in self.dict.keys(): self.namespace[k] = self.dict[k] self.namespace['_accessed_time'] = time.time() finally: self.namespace.release_write_lock() def lock(self): """locks this session against other processes/threads. this is automatic when load/save is called. ***use with caution*** and always with a corresponding 'unlock' inside a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.acquire_write_lock() def unlock(self): """unlocks this session against other processes/threads. this is automatic when load/save is called. ***use with caution*** and always within a "finally:" block, as a stray lock typically cannot be unlocked without shutting down the whole application. """ self.namespace.release_write_lock() def debug(self, message): if self.log_file is not None: self.log_file.write(message) class MyghtySessionArgs(PrefixArgs): def __init__(self, data_dir = None, **params): PrefixArgs.__init__(self, 'session_') self.set_prefix_params(**params) if not self.params.has_key('data_dir') and data_dir is not None: self.params['data_dir'] = os.path.join(data_dir, 'sessions') def get_session(self, request, **params): return Session(request, **self.get_params(**params)) def clone(self, **params): p = self.get_params(**params) arg = MyghtySessionArgs() arg.params = p return arg myghtyutils-0.52/lib/myghtyutils/synchronization.py0000666000175000017500000002750210460506136022036 0ustar malexmalex# $Id: synchronization.py,v 1.1.1.1 2006/01/12 20:54:38 classic Exp $ # synchronization.py - synchronization functions for Myghty # Copyright (C) 2004, 2005 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Myghty and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # __all__ = ["Synchronizer", "NameLock", "_threading", "_thread"] import os, weakref, tempfile, re, sys from util import * try: import thread as _thread import threading as _threading except ImportError: import dummy_thread as _thread import dummy_threading as _threading # check for fcntl module try: sys.getwindowsversion() has_flock = False except: try: import fcntl has_flock = True except ImportError: has_flock = False class NameLock: """a proxy for an RLock object that is stored in a name based registry. Multiple threads can get a reference to the same RLock based on the name alone, and synchronize operations related to that name. """ locks = WeakValuedRegistry() class NLContainer: """cant put Lock as a weakref""" def __init__(self, reentrant): if reentrant: self.lock = _threading.RLock() else: self.lock = _threading.Lock() def __call__(self): return self.lock def __init__(self, identifier = None, reentrant = False): self.lock = self._get_lock(identifier, reentrant) def acquire(self, wait = True): return self.lock().acquire(wait) def release(self): self.lock().release() def _get_lock(self, identifier, reentrant): if identifier is None: return NameLock.NLContainer(reentrant) return NameLock.locks.get(identifier, lambda: NameLock.NLContainer(reentrant)) synchronizers = WeakValuedRegistry() def Synchronizer(identifier = None, use_files = False, lock_dir = None, digest_filenames = True): """ returns an object that synchronizes a block against many simultaneous read operations and several synchronized write operations. Write operations are assumed to be much less frequent than read operations, and receive precedence when they request a write lock. uses strategies to determine if locking is performed via threading objects or file objects. the identifier identifies a name this Synchronizer is synchronizing against. All synchronizers of the same identifier will lock against each other, within the effective thread/process scope. use_files determines if this synchronizer will lock against thread mutexes or file locks. this sets the effective scope of the synchronizer, i.e. it will lock against other synchronizers in the same process, or against other synchronizers referencing the same filesystem referenced by lock_dir. the acquire/relase methods support nested/reentrant operation within a single thread via a recursion counter, so that only the outermost call to acquire/release has any effect. """ if not has_flock: use_files = False if use_files: # FileSynchronizer is one per thread return synchronizers.sync_get("file_%s_%s" % (identifier, _thread.get_ident()), lambda: FileSynchronizer(identifier, lock_dir, digest_filenames)) else: # ConditionSynchronizer is shared among threads return synchronizers.sync_get("condition_%s" % identifier, lambda: ConditionSynchronizer(identifier)) class SyncState: """used to track the current thread's reading/writing state as well as reentrant block counting""" def __init__(self): self.reentrantcount = 0 self.writing = False self.reading = False class SynchronizerImpl(object): """base for the synchronizer implementations. the acquire/release methods keep track of re-entrant calls within the current thread, and delegate to the do_XXX methods when appropriate.""" def __init__(self, *args, **params): pass def release_read_lock(self): state = self.state if state.writing: raise "lock is in writing state" if not state.reading: raise "lock is not in reading state" if state.reentrantcount == 1: self.do_release_read_lock() state.reading = False state.reentrantcount -= 1 def acquire_read_lock(self, wait = True): state = self.state if state.writing: raise "lock is in writing state" if state.reentrantcount == 0: x = self.do_acquire_read_lock(wait) if (wait or x): state.reentrantcount += 1 state.reading = True return x elif state.reading: state.reentrantcount += 1 return True def release_write_lock(self): state = self.state if state.reading: raise "lock is in reading state" if not state.writing: raise "lock is not in writing state" if state.reentrantcount == 1: self.do_release_write_lock() state.writing = False state.reentrantcount -= 1 def acquire_write_lock(self, wait = True): state = self.state if state.reading: raise "lock is in reading state" if state.reentrantcount == 0: x = self.do_acquire_write_lock(wait) if (wait or x): state.reentrantcount += 1 state.writing = True return x elif state.writing: state.reentrantcount += 1 return True def do_release_read_lock():raise NotImplementedError() def do_acquire_read_lock():raise NotImplementedError() def do_release_write_lock():raise NotImplementedError() def do_acquire_write_lock():raise NotImplementedError() class FileSynchronizer(SynchronizerImpl): """a synchronizer using lock files. as it relies upon flock(), which is not safe to use with the same file descriptor among multiple threads (one file descriptor per thread is OK), a separate FileSynchronizer must exist in each thread.""" def __init__(self, identifier, lock_dir, digest_filenames): self.state = SyncState() if lock_dir is None: lock_dir = tempfile.gettempdir() else: lock_dir = lock_dir self.encpath = EncodedPath(lock_dir, [identifier], extension = '.lock', digest = digest_filenames) self.filename = self.encpath.path self.opened = False self.filedesc = None def _open(self, mode): if not self.opened: try: self.filedesc = os.open(self.filename, mode) except OSError, e: self.encpath.verify_directory() self.filedesc = os.open(self.filename, mode) self.opened = True def do_acquire_read_lock(self, wait): self._open(os.O_CREAT | os.O_RDONLY) if not wait: try: fcntl.flock(self.filedesc, fcntl.LOCK_SH | fcntl.LOCK_NB) ret = True except IOError: ret = False return ret else: fcntl.flock(self.filedesc, fcntl.LOCK_SH) return True def do_acquire_write_lock(self, wait): self._open(os.O_CREAT | os.O_WRONLY) if not wait: try: fcntl.flock(self.filedesc, fcntl.LOCK_EX | fcntl.LOCK_NB) ret = True except IOError: ret = False return ret else: fcntl.flock(self.filedesc, fcntl.LOCK_EX); return True def do_release_read_lock(self): self.release_all_locks() def do_release_write_lock(self): self.release_all_locks() def release_all_locks(self): if self.opened: fcntl.flock(self.filedesc, fcntl.LOCK_UN) os.close(self.filedesc) self.opened = False def __del__(self): if os.access(self.filename, os.F_OK): try: os.remove(self.filename) except OSError: # occasionally another thread beats us to it pass class ConditionSynchronizer(SynchronizerImpl): """a synchronizer using a Condition. this synchronizer is based on threading.Lock() objects and therefore must be shared among threads.""" def __init__(self, identifier): self.tlocalstate = ThreadLocal(creator = lambda: SyncState()) # counts how many asynchronous methods are executing self.async = 0 # pointer to thread that is the current sync operation self.current_sync_operation = None # condition object to lock on self.condition = _threading.Condition(_threading.Lock()) state = property(lambda self: self.tlocalstate()) def do_acquire_read_lock(self, wait = True): self.condition.acquire() # see if a synchronous operation is waiting to start # or is already running, in which case we wait (or just # give up and return) if wait: while self.current_sync_operation is not None: self.condition.wait() else: if self.current_sync_operation is not None: self.condition.release() return False self.async += 1 self.condition.release() if not wait: return True def do_release_read_lock(self): self.condition.acquire() self.async -= 1 # check if we are the last asynchronous reader thread # out the door. if self.async == 0: # yes. so if a sync operation is waiting, notifyAll to wake # it up if self.current_sync_operation is not None: self.condition.notifyAll() elif self.async < 0: raise "Synchronizer error - too many release_read_locks called" self.condition.release() def do_acquire_write_lock(self, wait = True): self.condition.acquire() # here, we are not a synchronous reader, and after returning, # assuming waiting or immediate availability, we will be. if wait: # if another sync is working, wait while self.current_sync_operation is not None: self.condition.wait() else: # if another sync is working, # we dont want to wait, so forget it if self.current_sync_operation is not None: self.condition.release() return False # establish ourselves as the current sync # this indicates to other read/write operations # that they should wait until this is None again self.current_sync_operation = _threading.currentThread() # now wait again for asyncs to finish if self.async > 0: if wait: # wait self.condition.wait() else: # we dont want to wait, so forget it self.current_sync_operation = None self.condition.release() return False self.condition.release() if not wait: return True def do_release_write_lock(self): self.condition.acquire() if self.current_sync_operation != _threading.currentThread(): raise "Synchronizer error - current thread doesnt have the write lock" # reset the current sync operation so # another can get it self.current_sync_operation = None # tell everyone to get ready self.condition.notifyAll() # everyone go !! self.condition.release() myghtyutils-0.52/lib/myghtyutils/util.py0000666000175000017500000004261410460506136017553 0ustar malexmalex# $Id: util.py,v 1.1.1.1 2006/01/12 20:54:38 classic Exp $ # util.py - utility functions for Myghty # Copyright (C) 2004, 2005 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Myghty and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # __all__ = ["OrderedDict", "ThreadLocal", "Value", "InheritedDict", "ConstructorClone", "Registry", "WeakValuedRegistry", "SyncDict", "LRUCache", "argdict", "EncodedPath", "pid", "thread_id", "verify_directory", "PrefixArgs", "module"] try: import thread as _thread import threading as _threading except ImportError: import dummy_thread as _thread import dummy_threading as _threading import weakref, inspect, sha, string, os, UserDict, copy, sys, imp, re, stat, types, time def thread_id(): return _thread.get_ident() def pid(): return os.getpid() def verify_directory(dir): """verifies and creates a directory. tries to ignore collisions with other threads and processes.""" tries = 0 while not os.access(dir, os.F_OK): try: tries += 1 os.makedirs(dir, 0750) except: if tries > 5: raise def module(name): """imports a module, in the ordinary way, by string name""" mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod class argdict(dict): """supports the argument constructor form of dict which doesnt seem to be present in python 2.2""" def __init__(self, **params): dict.__init__(self) self.update(params) class Value: """allows pass-by-reference operations""" def __init__(self, value = None): self.value = value def __call__(self, *arg): if len(arg): self.assign(arg[0]) else: return self.value def __str__(self): return str(self.value) def assign(self, value): self.value = value class ThreadLocal: """stores a value on a per-thread basis""" def __init__(self, value = None, default = None, creator = None): self.dict = {} self.default = default self.creator = creator if value: self.put(value) def __call__(self, *arg): if len(arg): self.put(arg[0]) else: return self.get() def __str__(self): return str(self.get()) def assign(self, value): self.dict[_thread.get_ident()] = value def put(self, value): self.assign(value) def exists(self): return self.dict.has_key(_thread.get_ident()) def get(self, *args, **params): if not self.dict.has_key(_thread.get_ident()): if self.default is not None: self.put(self.default) elif self.creator is not None: self.put(self.creator(*args, **params)) return self.dict[_thread.get_ident()] def remove(self): del self.dict[_thread.get_ident()] class OrderedDict(UserDict.DictMixin): """A Dictionary that keeps its own internal ordering""" def __init__(self, values = None): self.list = [] self.dict = {} if values is not None: for val in values: self.update(val) def keys(self): return self.list def update(self, dict): for key in dict.keys(): self.__setitem__(key, dict[key]) def values(self): return map(lambda key: self[key], self.list) def __iter__(self): return iter(self.list) def itervalues(self): return iter([self[key] for key in self.list]) def iterkeys(self):return self.__iter__() def iteritems(self): return iter([(key, self[key]) for key in self.keys()]) def __delitem__(self, key): del self.dict[key] del self.list[self.list.index(key)] def __setitem__(self, key, object): if not self.has_key(key): self.list.append(key) self.dict.__setitem__(key, object) def __getitem__(self, key): return self.dict.__getitem__(key) class InheritedDict(UserDict.DictMixin): """a dictionary that can defer lookups to a second dictionary if the key is not found locally.""" def __init__(self, dict, superfunc): self.dict = dict self.superfunc = superfunc def __call__(self, key = None, value = None): if key is None and value is None: return self.dict elif value is None: try: return self.__getitem__(key) except KeyError: return None else: self.__setitem__(key, value) def __getitem__(self, key): dict = self.dict if dict.has_key(key): return dict[key] else: parent = self.superfunc() if parent is not None: return parent[key] raise KeyError(key) def __setitem__(self, key, value): self.dict[key] = value def __delitem__(self, key): del self.dict[key] def keys(self): return self.dict.keys() def __contains__(self, key): return self.has_key(key) def has_key(self, key): if self.dict.has_key(key): return True parent = self.superfunc() if parent is not None: return parent.has_key(key) return False class ConstructorClone: """cloning methods that take additional parameters. one method is a straight shallow copy, the other recreates the object via its constructor. both methods assume a relationship between the given parameters and the attribute names of the object.""" def __init__(self, instance, **params): self.classobj = instance.__class__ self.instance = instance self.params = params def copyclone(self): cl = copy.copy(self.instance) for key, value in self.params.iteritems(): setattr(cl, key, value) return cl # store the argument specs in a static hash argspecs = {} def clone(self): """creates a new instance of the class using the regular class constructor. the arguments to the constructor are divined from inspecting the parameter names, and pulling those parameters from the original instance's attributes. this is essentially a quickie cheater way to get a clone of an object if you can name your instance variables the same as that of the constructor arguments. """ key = self.classobj.__module__ + "." + self.classobj.__name__ if not ConstructorClone.argspecs.has_key(key): argspec = inspect.getargspec(self.classobj.__init__.im_func) argnames = argspec[0] or [] defaultvalues = argspec[3] or [] (requiredargs, namedargs) = ( argnames[0:len(argnames) - len(defaultvalues)], argnames[len(argnames) - len(defaultvalues):] ) ConstructorClone.argspecs[key] = (requiredargs, namedargs) (requiredargs, namedargs) = ConstructorClone.argspecs[key] newargs = [] newparams = {} addlparams = self.params.copy() for arg in requiredargs: if arg == 'self': continue elif self.params.has_key(arg): newargs.append(self.params[arg]) else: newargs.append(getattr(self.instance, arg)) if addlparams.has_key(arg): del addlparams[arg] for arg in namedargs: if addlparams.has_key(arg): del addlparams[arg] if self.params.has_key(arg): newparams[arg] = self.params[arg] else: if hasattr(self.instance, arg): newparams[arg] = getattr(self.instance, arg) else: raise "instance has no attribute '%s'" % arg newparams.update(addlparams) return self.classobj(*newargs, **newparams) class PrefixArgs: """extracts from the given argument dictionary all values with a key '' and stores a reference. """ def __init__(self, prefix): self.prefix = prefix self.params = {} self.prelen = len(prefix) def set_prefix_params(self, **params): """from the given dictionary, copies all values with keys in the form "" to this one.""" for key, item in params.iteritems(): if key[0:self.prelen] == self.prefix: self.params[key[self.prelen:]] = item def set_params(self, **params): """from the given dictionary, copies all key/values to this one.""" self.params.update(params) def get_params(self, **params): """returns a new dictionary with this object's values plus those in the given dictionary, with prefixes stripped from the keys.""" p = self.params.copy() for key, item in params.iteritems(): if key[0:self.prelen] == self.prefix: p[key[self.prelen:]] = item else: p[key] = item return p class SyncDict: """ an efficient/threadsafe singleton map algorithm, a.k.a. "get a value based on this key, and create if not found or not valid" paradigm: exists && isvalid ? get : create works with weakref dictionaries and the LRUCache to handle items asynchronously disappearing from the dictionary. use python 2.3.3 or greater ! a major bug was just fixed in Nov. 2003 that was driving me nuts with garbage collection/weakrefs in this section. """ def __init__(self, mutex, dictionary): self.mutex = mutex self.dict = dictionary def get(self, key, createfunc, mutex = None, isvalidfunc = None): """regular get method. returns the object asynchronously, if present and also passes the optional isvalidfunc, else defers to the synchronous get method which will create it.""" try: if self.has_key(key): return self._get_obj(key, createfunc, mutex, isvalidfunc) else: return self.sync_get(key, createfunc, mutex, isvalidfunc) except KeyError: return self.sync_get(key, createfunc, mutex, isvalidfunc) def sync_get(self, key, createfunc, mutex = None, isvalidfunc = None): if mutex is None: mutex = self.mutex mutex.acquire() try: try: if self.has_key(key): return self._get_obj(key, createfunc, mutex, isvalidfunc, create = True) else: return self._create(key, createfunc) except KeyError: return self._create(key, createfunc) finally: mutex.release() def _get_obj(self, key, createfunc, mutex, isvalidfunc, create = False): obj = self[key] if isvalidfunc is not None and not isvalidfunc(obj): if create: return self._create(key, createfunc) else: return self.sync_get(key, createfunc, mutex, isvalidfunc) else: return obj def _create(self, key, createfunc): obj = createfunc() self[key] = obj return obj def has_key(self, key): return self.dict.has_key(key) def __contains__(self, key): return self.dict.__contains__(key) def __getitem__(self, key): return self.dict.__getitem__(key) def __setitem__(self, key, value): self.dict.__setitem__(key, value) def __delitem__(self, key): return self.dict.__delitem__(key) class Registry(SyncDict): """a registry object.""" def __init__(self): SyncDict.__init__(self, _threading.Lock(), {}) class WeakValuedRegistry(SyncDict): """a registry that stores objects only as long as someone has a reference to them.""" def __init__(self): # weakrefs apparently can trigger the __del__ method of other # unreferenced objects, when you create a new reference. this can occur # when you place new items into the WeakValueDictionary. if that __del__ # method happens to want to access this same registry, well, then you need # the RLock instead of a regular lock, since at the point of dictionary # insertion, we are already inside the lock. SyncDict.__init__(self, _threading.RLock(), weakref.WeakValueDictionary()) class LRUCache(SyncDict): """a cache (mapping class) that stores only a certain number of elements, and discards its least recently used element when full.""" class ListElement: def __init__(self, key, value): self.key = key self.setvalue(value) def setvalue(self, value): self.value = value if hasattr(value, 'size'): self.size = value.size else: self.size = 1 def __init__(self, size, deletefunc = None, sizethreshhold = .2): SyncDict.__init__(self, _threading.Lock(), {}) self.size = size self.maxelemsize = sizethreshhold * size self.head = None self.tail = None self.deletefunc = deletefunc self.currentsize = 0 # inner mutex to synchronize list manipulation # operations independently of the SyncDict self.listmutex = _threading.Lock() def __setitem__(self, key, value): self.listmutex.acquire() try: existing = self.dict.get(key, None) if existing is None: element = LRUCache.ListElement(key, value) #if element.size > self.maxelemsize: return self.dict[key] = element self._insertElement(element) else: #if element.size > self.maxelemsize: #del self.dict[key] #self._removeElement(element) oldsize = existing.size existing.setvalue(value) self.currentsize += (existing.size - oldsize) self._updateElement(existing) self._manageSize() finally: self.listmutex.release() def __getitem__(self, key): self.listmutex.acquire() try: element = self.dict[key] self._updateElement(element) return element.value finally: self.listmutex.release() def __contains__(self, key): return self.dict.has_key(key) def has_key(self, key): return self.dict.has_key(key) def _insertElement(self, element): # zero-length elements are not managed in the LRU queue since they # have no affect on the total size if element.size == 0: return element.previous = None element.next = self.head if self.head is not None: self.head.previous = element else: self.tail = element self.head = element self.currentsize += element.size self._manageSize() def _manageSize(self): # TODO: dont remove one element at a time, remove the # excess in one step while self.currentsize > self.size: oldelem = self.dict[self.tail.key] if self.deletefunc is not None: self.deletefunc(oldelem.value) self.currentsize -= oldelem.size del self.dict[self.tail.key] if self.tail != self.head: self.tail = self.tail.previous self.tail.next = None else: self.tail = None self.head = None def _updateElement(self, element): # zero-length elements are not managed in the LRU queue since they # have no affect on the total size if element.size == 0: return if self.head == element: return e = element.previous e.next = element.next if element.next is not None: element.next.previous = e else: self.tail = e element.previous = None element.next = self.head self.head.previous = element self.head = element # TODO: iteration class EncodedPath: """generates a unique file-accessible path from the given list of identifiers starting at the given root directory.""" def __init__(self, root, identifiers, extension = ".enc", depth = 3, verify = True, digest = True): ident = string.join(identifiers, "_") if digest: ident = sha.new(ident).hexdigest() tokens = [] for d in range(1, depth): tokens.append(ident[0:d]) dir = os.path.join(root, *tokens) if verify: verify_directory(dir) self.dir = dir self.path = os.path.join(dir, ident + extension) def verify_directory(self): verify_directory(self.dir) def get_path(self): return self.path myghtyutils-0.52/lib/myghtyutils/__init__.py0000666000175000017500000000000010460506136020314 0ustar malexmalexmyghtyutils-0.52/lib/myghtyutils/ext/0000755000175000017500000000000010515076431017012 5ustar malexmalexmyghtyutils-0.52/lib/myghtyutils/ext/memcached.py0000666000175000017500000000500610460506134021274 0ustar malexmaleximport memcache from myghtyutils.synchronization import * from myghtyutils.container import NamespaceManager, Container import sys class MemcachedNamespaceManager(NamespaceManager): def __init__(self, context, namespace, url, **params): NamespaceManager.__init__(self, context, namespace, **params) self.mc = memcache.Client([url], debug=0) # memcached does its own locking. override our own stuff def do_acquire_read_lock(self): pass def do_release_read_lock(self): pass def do_acquire_write_lock(self, wait = True): return True def do_release_write_lock(self): pass # override open/close to do nothing, keep memcache connection open as long # as possible def open(self, *args, **params):pass def close(self, *args, **params):pass def __getitem__(self, key): value = self.mc.get(self.namespace + "_" + key) if value is None: raise KeyError(key) return value def __contains__(self, key): return self.mc.get(self.namespace + "_" + key) is not None def has_key(self, key): return self.mc.get(self.namespace + "_" + key) is not None def __setitem__(self, key, value): keys = self.mc.get(self.namespace + ':keys') if keys is None: keys = {} keys[key] = True self.mc.set(self.namespace + ':keys', keys) self.mc.set(self.namespace + "_" + key, value) def __delitem__(self, key): keys = self.mc.get(self.namespace + ':keys') try: del keys[key] self.mc.delete(self.namespace + "_" + key) self.mc.set(self.namespace + ':keys', keys) except KeyError: raise def do_remove(self): pass def keys(self): keys = self.mc.get(self.namespace + ':keys') if keys is None: return [] else: return keys.keys() class MemcachedContainer(Container): def do_init(self, **params): self.funclock = None def do_create_namespace_manager(self, context, namespace, url, **params): return MemcachedNamespaceManager(context, namespace, url, **params) def lock_createfunc(self, wait = True): if self.funclock is None: self.funclock = Synchronizer(identifier = "memcachedcontainer/funclock/%s" % self.namespacemanager.namespace, use_files = True, lock_dir = self.namespacemanager.lock_dir) return self.funclock.acquire_write_lock(wait) def unlock_createfunc(self): self.funclock.release_write_lock() myghtyutils-0.52/lib/myghtyutils/ext/__init__.py0000666000175000017500000000000010460506134021112 0ustar malexmalexmyghtyutils-0.52/lib/MyghtyUtils.egg-info/0000755000175000017500000000000010515076431017604 5ustar malexmalexmyghtyutils-0.52/lib/MyghtyUtils.egg-info/dependency_links.txt0000666000175000017500000000000110462270030023646 0ustar malexmalex myghtyutils-0.52/lib/MyghtyUtils.egg-info/PKG-INFO0000666000175000017500000000246610462270030020705 0ustar malexmalexMetadata-Version: 1.0 Name: MyghtyUtils Version: 0.52 Summary: Container and Utility Functions from the Myghty Template Framework Home-page: http://www.myghty.org Author: Mike Bayer Author-email: mike@myghty.org License: MIT License Description: This is the set of utility classes used by Myghty templating. Included are: container - the Containment system providing back-end neutral key/value storage, with support for in-memory, DBM files, flat files, and memcached buffer - some functions for augmenting file objects util - various utility functions and objects synchronizer - provides many reader/single writer synchronization using either thread mutexes or lockfiles session - provides a Session interface built upon the Container, similar interface to mod_python session. Currently needs a mod_python-like request object, this should be changed to something more generic. `Development SVN `_ Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python myghtyutils-0.52/lib/MyghtyUtils.egg-info/SOURCES.txt0000666000175000017500000000100310462270030021456 0ustar malexmalexLICENSE README ez_setup.py setup.py lib/MyghtyUtils.egg-info/PKG-INFO lib/MyghtyUtils.egg-info/SOURCES.txt lib/MyghtyUtils.egg-info/dependency_links.txt lib/MyghtyUtils.egg-info/top_level.txt lib/myghtyutils/__init__.py lib/myghtyutils/buffer.py lib/myghtyutils/container.py lib/myghtyutils/session.py lib/myghtyutils/synchronization.py lib/myghtyutils/util.py lib/myghtyutils/ext/__init__.py lib/myghtyutils/ext/memcached.py test/Container.py test/LRUCache.py test/SyncDict.py test/testbase.py myghtyutils-0.52/lib/MyghtyUtils.egg-info/top_level.txt0000666000175000017500000000001410462270030022325 0ustar malexmalexmyghtyutils myghtyutils-0.52/test/0000755000175000017500000000000010515076431014021 5ustar malexmalexmyghtyutils-0.52/test/Container.py0000666000175000017500000001072410460506134016322 0ustar malexmalexfrom myghtyutils.container import * import myghtyutils.buffer as buffer import random, time, weakref, sys, re import testbase import unittest, sys # container test - # tests the container's get_value() function mostly, to insure # that items are recreated when expired, and that create function # is called exactly once per expiration try: import thread except: raise "this test requires a thread-enabled python" class item: def __init__(self, id): self.id = id def __str__(self): return "item id %d" % self.id def test_item(self): return True class context(ContainerContext): pass #def __init__(self): # ContainerContext.__init__(self, buffer.LogFormatter(buffer.LinePrinter(sys.stdout), "test", id_threads = True)) # keep running indicator running = False starttime = time.time() # creation func entrance detector to detect non-synchronized access # to the create function baton = None context = context() def create(id, delay = 0): global baton if baton is not None: raise "baton is not none , ident " + repr(baton) + " this thread " + repr(thread.get_ident()) baton = thread.get_ident() try: i = item(id) time.sleep(delay) global totalcreates totalcreates += 1 return i finally: baton = None def test(cclass, id, statusdict, expiretime, delay, params): print "create thread %d starting" % id statusdict[id] = True try: container = cclass(context = context, namespace = 'test', key = 'test', createfunc = lambda: create(id, delay), expiretime = expiretime, data_dir='./cache', starttime = starttime, **params) global running global totalgets try: while running: item = container.get_value() if not item.test_item(): raise "item did not test" item = None totalgets += 1 time.sleep(random.random() * .00001) except: e = sys.exc_info()[0] running = False print e raise finally: print "create thread %d exiting" % id statusdict[id] = False def runtest(cclass, totaltime, expiretime, delay, **params): statusdict = {} global totalcreates totalcreates = 0 global totalgets totalgets = 0 container = cclass(context = context, namespace = 'test', key = 'test', createfunc = lambda: create(id, delay), expiretime = expiretime, data_dir='./cache', starttime = starttime, **params) container.clear_value() global running running = True for t in range(1, 20): thread.start_new_thread(test, (cclass, t, statusdict, expiretime, delay, params)) time.sleep(totaltime) failed = not running running = False pause = True while pause: time.sleep(1) pause = False for v in statusdict.values(): if v: pause = True break if failed: raise "test failed" print "total object creates %d" % totalcreates print "total object gets %d" % totalgets class ContainerTest(testbase.MyghtyTest): def _runtest(self, cclass, totaltime, expiretime, delay, **params): print "\ntesting %s for %d secs with expiretime %s delay %d" % ( cclass, totaltime, expiretime, delay) runtest(cclass, totaltime, expiretime, delay, **params) if expiretime is None: self.assert_(totalcreates == 1) else: self.assert_(abs(totaltime / expiretime - totalcreates) <= 2) def testMemoryContainer(self, totaltime=10, expiretime=None, delay=0): self._runtest(container_registry('memory', 'Container'), totaltime, expiretime, delay) def testMemoryContainer2(self): self.testMemoryContainer(expiretime=2) def testMemoryContainer3(self): self.testMemoryContainer(expiretime=5, delay=2) def testDbmContainer(self, totaltime=10, expiretime=None, delay=0): self._runtest(container_registry('dbm', 'Container'), totaltime, expiretime, delay) def testDbmContainer2(self): self.testDbmContainer(expiretime=2) def testDbmContainer3(self): self.testDbmContainer(expiretime=5, delay=2) if __name__ == "__main__": testbase.runTests(unittest.findTestCases(__import__('__main__'))) myghtyutils-0.52/test/LRUCache.py0000666000175000017500000000162210460506134015763 0ustar malexmalexfrom myghty.util import LRUCache import string, unittest import testbase class item: def __init__(self, id): self.id = id def __str__(self): return "item id %d" % self.id class LRUTest(testbase.MyghtyTest): def setUp(self): self.cache = LRUCache(10) def print_cache(l): for item in l: print item, print def testlru(self): l = self.cache for id in range(1,13): l[id] = item(id) self.assert_(not l.has_key(1)) self.assert_(not l.has_key(2)) for id in range(3,12): self.assert_(l.has_key(id)) l[4] l[5] l[13] = item(13) self.assert_(not l.has_key(3)) for id in (4,5,6,7,8,9,10,11,12, 13): self.assert_(l.has_key(id)) if __name__ == "__main__": unittest.main() myghtyutils-0.52/test/SyncDict.py0000666000175000017500000000776410460506134016132 0ustar malexmalexfrom myghtyutils.util import SyncDict import random, time, weakref, sys # this script tests SyncDict for its thread safety, # ability to always return a value even for a dictionary # that loses data randomly, and # insures that when used as a registry, only one instance # of a particular key/value exists at any one time. try: import thread except: raise "this test requires a thread-enabled python" class item: def __init__(self, id): self.id = id def __str__(self): return "item id %d" % self.id # keep running indicator running = False # one item is referenced at a time (to insure singleton pattern) theitem = weakref.ref(item(0)) # creation func entrance detector to detect non-synchronized access # to the create function baton = None def create(id): global baton if baton is not None: raise "baton is not none !" baton = True try: global theitem if theitem() is not None: raise "create %d old item is still referenced" % id i = item(id) theitem = weakref.ref(i) global totalcreates totalcreates += 1 return i finally: baton = None def test(s, id, statusdict): print "create thread %d starting" % id statusdict[id] = True try: global running global totalgets try: while running: s.get('test', lambda: create(id)) totalgets += 1 time.sleep(random.random() * .00001) except: e = sys.exc_info()[0] running = False print e finally: print "create thread %d exiting" % id statusdict[id] = False def runtest(s): statusdict = {} global totalcreates totalcreates = 0 global totalremoves totalremoves = 0 global totalgets totalgets = 0 global running running = True for t in range(1, 10): thread.start_new_thread(test, (s, t, statusdict)) time.sleep(1) for x in range (0,10): if not running: break print "Removing item" totalremoves += 1 try: del s['test'] except KeyError: pass time.sleep(random.random() * .89) failed = not running running = False pause = True while pause: time.sleep(1) pause = False for v in statusdict.values(): if v: pause = True break if failed: raise "test failed" print "total object creates %d" % totalcreates print "total object gets %d" % totalgets print "total object removes %d" % totalremoves # normal dictionary test, where we will remove the value # periodically. the number of creates should be equal to # the number of removes plus one. print "\ntesting with normal dict" runtest(SyncDict(thread.allocate_lock(), {})) assert(totalremoves + 1 == totalcreates) # the goofydict is designed to act like a weakvaluedictionary, # where its values are dereferenced and disposed of # in between a has_key() and a # __getitem__() operation 50% of the time. # the number of creates should be about half of what the # number of gets is. class goofydict(dict): def has_key(self, key): if dict.has_key(self, key): if random.random() > 0.5: del self[key] return True else: return False print "\ntesting with goofy dict" runtest(SyncDict(thread.allocate_lock(), goofydict())) assert(float(totalcreates) / float(totalgets) < .52 and float(totalcreates) / float(totalgets) > .48) # the weakvaluedictionary test involves newly created items # that are instantly disposed since no strong reference exists to them. # the number of creates should be equal to the number of gets. print "\ntesting with weak dict" runtest(SyncDict(thread.allocate_lock(), weakref.WeakValueDictionary())) assert(totalcreates == totalgets) myghtyutils-0.52/test/testbase.py0000666000175000017500000000312010460506134016202 0ustar malexmaleximport unittest, os, sys import myghty.util as util class MyghtyTest(unittest.TestCase): def __init__(self, *args, **params): unittest.TestCase.__init__(self, *args, **params) # make ourselves a Myghty environment self.root = os.path.abspath(os.path.join(os.getcwd(), 'testroot')) # some templates self.htdocs = os.path.join(self.root, 'htdocs') # some more templates self.components = os.path.join(self.root, 'components') # data dir for cache, sessions, compiled self.cache = os.path.join(self.root, 'cache') # lib dir for some module components self.lib = os.path.join(self.root, 'lib') sys.path.insert(0, self.lib) for path in (self.htdocs, self.components, self.cache, self.lib): util.verify_directory(path) self.class_set_up() def class_set_up(self): pass def class_tear_down(self): pass def __del__(self): self.class_tear_down() def create_file(self, dir, name, contents): file = os.path.join(dir, name) f = open(file, 'w') f.write(contents) f.close() def create_directory(self, dir, path): util.verify_directory(os.path.join(dir, path)) def remove_file(self, dir, name): if os.access(os.path.join(dir, name), os.F_OK): os.remove(os.path.join(dir, name)) def runTests(suite): runner = unittest.TextTestRunner(verbosity = 2, descriptions =1) runner.run(suite)