Axiom-0.6.0/0000755000175000017500000000000011304543322012503 5ustar exarkunexarkunAxiom-0.6.0/axiom/0000755000175000017500000000000011304543322013620 5ustar exarkunexarkunAxiom-0.6.0/axiom/benchmarks/0000755000175000017500000000000011304543322015735 5ustar exarkunexarkunAxiom-0.6.0/axiom/benchmarks/benchmark_batchitemcreation.py0000644000175000017500000000101010513272702024000 0ustar exarkunexarkun """ Benchmark batch creation of a large number of simple Items in a transaction. """ from epsilon.scripts import benchmark from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, text class AB(Item): a = integer() b = text() def main(): s = Store("TEMPORARY.axiom") benchmark.start() rows = [(x, unicode(x)) for x in xrange(10000)] s.transact(lambda: s.batchInsert(AB, (AB.a, AB.b), rows)) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/benchmark_batchitemdeletion.py0000644000175000017500000000106510513272702024011 0ustar exarkunexarkun """ Benchmark batch creation of a large number of simple Items in a transaction. """ from epsilon.scripts import benchmark from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, text class AB(Item): a = integer() b = text() def main(): s = Store("TEMPORARY.axiom") rows = [(x, unicode(x)) for x in xrange(10000)] s.transact(lambda: s.batchInsert(AB, (AB.a, AB.b), rows)) benchmark.start() s.transact(s.query(AB).deleteFromStore) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/benchmark_itemcreation.py0000644000175000017500000000077610437615621023026 0ustar exarkunexarkun """ Benchmark creation of a large number of simple Items in a transaction. """ from epsilon.scripts import benchmark from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, text class AB(Item): a = integer() b = text() def main(): s = Store("TEMPORARY.axiom") def txn(): for x in range(10000): AB(a=x, b=unicode(x), store=s) benchmark.start() s.transact(txn) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/benchmark_itemdeletion.py0000644000175000017500000000117310513272702023007 0ustar exarkunexarkun """ Benchmark batch creation of a large number of simple Items in a transaction. """ from epsilon.scripts import benchmark from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, text class AB(Item): a = integer() b = text() def main(): s = Store("TEMPORARY.axiom") rows = [(x, unicode(x)) for x in xrange(10000)] s.transact(lambda: s.batchInsert(AB, (AB.a, AB.b), rows)) def deleteStuff(): for it in s.query(AB): it.deleteFromStore() benchmark.start() s.transact(deleteStuff) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/benchmark_tagnames.py0000644000175000017500000000136110437615621022131 0ustar exarkunexarkun """ Benchmark the tagNames method of L{axiom.tags.Catalog} """ import time, sys from epsilon.scripts import benchmark from axiom import store, item, attributes, tags N_TAGS = 20 N_COPIES = 5000 N_LOOPS = 1000 class TaggedObject(item.Item): name = attributes.text() def main(): s = store.Store("tags.axiom") c = tags.Catalog(store=s) o = TaggedObject(store=s) def tagObjects(tag, copies): for x in xrange(copies): c.tag(o, tag) for i in xrange(N_TAGS): s.transact(tagObjects, unicode(i), N_COPIES) def getTags(): for i in xrange(N_LOOPS): list(c.tagNames()) benchmark.start() s.transact(getTags) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/benchmark_tagsof.py0000644000175000017500000000153610437615621021621 0ustar exarkunexarkun """ Benchmark the tagsOf method of L{axiom.tags.Catalog} """ import time, sys from epsilon.scripts import benchmark from axiom import store, item, attributes, tags N = 30 class TaggedObject(item.Item): name = attributes.text() def main(): s = store.Store("tags.axiom") c = tags.Catalog(store=s) objects = [] def createObjects(): for x in xrange(N): objects.append(TaggedObject(store=s)) s.transact(createObjects) def tagObjects(): for o in objects: for x in xrange(N): c.tag(o, unicode(x)) s.transact(tagObjects) def getTags(): for i in xrange(N): for o in objects: for t in c.tagsOf(o): pass benchmark.start() s.transact(getTags) benchmark.stop() if __name__ == '__main__': main() Axiom-0.6.0/axiom/benchmarks/testbase.py0000644000175000017500000000014010272262634020123 0ustar exarkunexarkun from pysqlite2.dbapi2 import connect as opendb con = opendb("test.sqlite") cur = con.cursor() Axiom-0.6.0/axiom/benchmarks/testindex.py0000644000175000017500000000013410330527357020324 0ustar exarkunexarkun from testbase import cur cur.execute('create index foo_bar_idx on foo(bar)') cur.commit() Axiom-0.6.0/axiom/benchmarks/testinit.py0000644000175000017500000000033410272262634020161 0ustar exarkunexarkun from testbase import con, cur cur.execute("create table foo (bar int, baz varchar)") for x in range(500): cur.execute("insert into foo values (?, ?)", (x, "string-value-of-"+str(x))) con.commit() Axiom-0.6.0/axiom/benchmarks/testreader.py0000644000175000017500000000033310330527357020460 0ustar exarkunexarkun import itertools import time from testbase import cur for num in itertools.count(): cur.execute("select * from foo") foovals = cur.fetchall() print num, 'I fetched', len(foovals), 'values.', time.ctime() Axiom-0.6.0/axiom/benchmarks/testwriter.py0000644000175000017500000000051510272262634020533 0ustar exarkunexarkun import time import itertools from testbase import con, cur BATCH = 500 for num in itertools.count(): for x in range(BATCH): n = (num * BATCH) + x cur.execute("insert into foo values (?, ?)", (n, "string-value-of-"+str(n))) con.commit() print num, 'write pass complete', time.ctime() Axiom-0.6.0/axiom/examples/0000755000175000017500000000000011304543322015436 5ustar exarkunexarkunAxiom-0.6.0/axiom/examples/bucket.py0000644000175000017500000000232510272262634017276 0ustar exarkunexarkunfrom axiom import item, attributes class Bucket(item.Item): typeName = 'bucket' schemaVersion = 1 name = attributes.text() def getstuff(self): for food in self.store.query(FoodItem, FoodItem.bucket == self, sort=FoodItem.deliciousness.descending): food.extra.what() class FoodItem(item.Item): typeName = 'food' schemaVersion = 1 bucket = attributes.reference() extra = attributes.reference() deliciousness = attributes.integer(indexed=True) class Chicken(item.Item): typeName = 'chicken' schemaVersion = 1 epistemologicalBasisForCrossingTheRoad = attributes.text() def what(self): print 'chicken!' class Biscuit(item.Item): typeName = 'biscuit' schemaVersion = 1 fluffiness = attributes.integer() def what(self): print 'biscuits!' from axiom.store import Store s = Store() u = Bucket(name=u'whatever', store=s) c = Chicken(epistemologicalBasisForCrossingTheRoad=u'extropian', store=s) b = Biscuit(fluffiness=100, store=s) FoodItem(store=s, deliciousness=3, extra=c, bucket=u) FoodItem(store=s, deliciousness=4, extra=b, bucket=u) u.getstuff() Axiom-0.6.0/axiom/examples/library.py0000644000175000017500000000631110310655706017463 0ustar exarkunexarkun import random from axiom.item import Item from axiom.attributes import text, timestamp, reference, integer, AND, OR from axiom.store import Store from epsilon import extime _d = extime.Time.fromISO8601TimeAndDate _books = [ (u'Heart of Darkness', u'Joseph Conrad', u'0486264645', 80, _d('1990-07-01T00:00:00.000001')), (u'The Dark Tower, Book 7', u'Stephen King', u'1880418622', 864, _d('2004-11-21T00:00:00.000001')), (u'Guns, Germs, and Steel: The Fates of Human Societies', u'Jared Diamond', u'0393317552', 480, _d('1999-04-01T00:00:00.000001')), (u'The Lions of al-Rassan', u'Guy Gavriel Kay', u'0060733497', 528, _d('2005-06-28T00:00:00.000001')), ] _borrowers = [u'Anne', u'Bob', u'Carol', u'Dave'] class Borrower(Item): typeName = 'borrower' schemaVersion = 1 name = text(indexed=True) class Book(Item): typeName = 'book' schemaVersion = 1 title = text() author = text() isbn = text() pages = integer() datePublished = timestamp() lentTo = reference() library = reference() class LendingLibrary(Item): typeName = 'lending_library' schemaVersion = 1 name = text() def books(self): return self.store.query(Book, Book.library == self) def getBorrower(self, name): for b in self.store.query(Borrower, Borrower.name == name): return b b = Borrower(name=name, store=self.store) return b def initialize(self): for title, author, isbn, pages, published in _books: b = Book( title=title, author=author, isbn=isbn, pages=pages, datePublished=published, library=self, store=self.store) def displayBooks(self): for book in self.books(): print book.title, if book.lentTo is not None: print 'lent to', '['+book.lentTo.name+']' else: print 'in library' def shuffleLending(self): for book in self.books(): if book.lentTo is not None: print book.lentTo.name, 'returned', book.title book.lentTo = None for book in self.books(): if random.choice([True, False]): borrower = random.choice(_borrowers) print 'Lending', book.title, 'to', borrower book.lentTo = self.getBorrower(borrower) def main(s): for ll in s.query(LendingLibrary): print 'found existing library' break else: print 'creating new library' ll = LendingLibrary(store=s) ll.initialize() ll.displayBooks() print '***' ll.shuffleLending() print '---' ll.displayBooks() print '***' ll.shuffleLending() print '---' print s.count(Book, AND (Book.author == u'Stephen King', Book.title == u'The Lions of al-Rassan')) print s.count(Book, OR (Book.author == u'Stephen King', Book.title == u'The Lions of al-Rassan')) if __name__ == '__main__': s = Store('testdb') s.transact(main, s) s.close() Axiom-0.6.0/axiom/plugins/0000755000175000017500000000000011304543322015301 5ustar exarkunexarkunAxiom-0.6.0/axiom/plugins/__init__.py0000644000175000017500000000044611046607253017425 0ustar exarkunexarkun# Copyright (c) 2008 Divmod. See LICENSE for details. """ Package for plugins for interfaces in Axiom. """ from epsilon.hotfix import require require('twisted', 'plugin_package_paths') from twisted.plugin import pluginPackagePaths __path__.extend(pluginPackagePaths(__name__)) __all__ = [] Axiom-0.6.0/axiom/plugins/axiom_plugins.py0000644000175000017500000002253611127421367020550 0ustar exarkunexarkun# Copyright (c) 2008 Divmod. See LICENSE for details. """ Plugins provided by Axiom for Axiom. """ import getpass import code, os, traceback, sys try: import readline except ImportError: readline = None from zope.interface import directlyProvides from twisted.python import usage, filepath, log from twisted.python.reflect import qual from twisted.plugin import IPlugin from epsilon.hotfix import require require('twisted', 'filepath_copyTo') import axiom from axiom import store, attributes, userbase, dependency, errors from axiom.substore import SubStore from axiom.scripts import axiomatic from axiom.listversions import ListVersions from axiom import version from axiom.iaxiom import IVersion directlyProvides(version, IPlugin, IVersion) #placate pyflakes ListVersions class Upgrade(axiomatic.AxiomaticCommand): name = 'upgrade' description = 'Synchronously upgrade an Axiom store and substores' optParameters = [ ('count', 'n', '100', 'Number of upgrades to perform per transaction')] errorMessageFormat = 'Error upgrading item (with typeName=%s and storeID=%d) from version %d to %d.' def upgradeEverything(self, store): """ Upgrade all the items in C{store}. """ for dummy in store._upgradeManager.upgradeBatch(self.count): pass def upgradeStore(self, store): """ Recursively upgrade C{store}. """ self.upgradeEverything(store) for substore in store.query(SubStore): self.upgradeStore(substore.open()) def perform(self, store, count): """ Upgrade C{store} performing C{count} upgrades per transaction. Also, catch any exceptions and print out something useful. """ self.count = count try: self.upgradeStore(store) print 'Upgrade complete' except errors.ItemUpgradeError, e: print 'Upgrader error:' e.originalFailure.printTraceback(file=sys.stdout) print self.errorMessageFormat % ( e.oldType.typeName, e.storeID, e.oldType.schemaVersion, e.newType.schemaVersion) def postOptions(self): try: count = int(self['count']) except ValueError: raise usage.UsageError('count must be an integer') siteStore = self.parent.getStore() self.perform(siteStore, count) class AxiomConsole(code.InteractiveConsole): def runcode(self, code): """ Override L{code.InteractiveConsole.runcode} to run the code in a transaction unless the local C{autocommit} is currently set to a true value. """ if not self.locals.get('autocommit', None): return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code) return code.InteractiveConsole.runcode(self, code) class Browse(axiomatic.AxiomaticCommand): synopsis = "[options]" name = 'browse' description = 'Interact with an Axiom store.' optParameters = [ ('history-file', 'h', '~/.axiomatic-browser-history', 'Name of the file to which to save input history.'), ] optFlags = [ ('debug', 'b', 'Open Store in debug mode.'), ] def postOptions(self): interp = code.InteractiveConsole(self.namespace(), '') historyFile = os.path.expanduser(self['history-file']) if readline is not None and os.path.exists(historyFile): readline.read_history_file(historyFile) try: interp.interact("%s. Autocommit is off." % (str(axiom.version),)) finally: if readline is not None: readline.write_history_file(historyFile) def namespace(self): """ Return a dictionary representing the namespace which should be available to the user. """ self._ns = { 'db': self.store, 'store': store, 'autocommit': False, } return self._ns class UserbaseMixin: def installOn(self, other): # XXX check installation on other, not store for ls in self.store.query(userbase.LoginSystem): raise usage.UsageError("UserBase already installed") else: ls = userbase.LoginSystem(store=self.store) dependency.installOn(ls, other) return ls class Install(axiomatic.AxiomaticSubCommand, UserbaseMixin): def postOptions(self): self.installOn(self.store) class Create(axiomatic.AxiomaticSubCommand, UserbaseMixin): synopsis = " [password]" def parseArgs(self, username, domain, password=None): self['username'] = self.decodeCommandLine(username) self['domain'] = self.decodeCommandLine(domain) self['password'] = password def postOptions(self): msg = 'Enter new AXIOM password: ' while not self['password']: password = getpass.getpass(msg) second = getpass.getpass('Repeat to verify: ') if password == second: self['password'] = password else: msg = 'Passwords do not match. Enter new AXIOM password: ' self.addAccount( self.store, self['username'], self['domain'], self['password']) def addAccount(self, siteStore, username, domain, password): """ Create a new account in the given store. @param siteStore: A site Store to which login credentials will be added. @param username: Local part of the username for the credentials to add. @param domain: Domain part of the username for the credentials to add. @param password: Password for the credentials to add. @rtype: L{LoginAccount} @return: The added account. """ for ls in siteStore.query(userbase.LoginSystem): break else: ls = self.installOn(siteStore) try: acc = ls.addAccount(username, domain, password) except userbase.DuplicateUser: raise usage.UsageError("An account by that name already exists.") return acc class Disable(axiomatic.AxiomaticSubCommand): synopsis = " " def parseArgs(self, username, domain): self['username'] = self.decodeCommandLine(username) self['domain'] = self.decodeCommandLine(domain) def postOptions(self): for acc in self.store.query(userbase.LoginAccount, attributes.AND(userbase.LoginAccount.username == self['username'], userbase.LoginAccount.domain == self['domain'])): if acc.disabled: raise usage.UsageError("That account is already disabled.") else: acc.disabled = True break else: raise usage.UsageError("No account by that name exists.") class List(axiomatic.AxiomaticSubCommand): def postOptions(self): acc = None for acc in self.store.query(userbase.LoginMethod): if acc.domain is None: print acc.localpart, else: print acc.localpart + '@' + acc.domain, if acc.account.disabled: print '[DISABLED]' else: print if acc is None: print 'No accounts' class UserBaseCommand(axiomatic.AxiomaticCommand): name = 'userbase' description = 'LoginSystem introspection and manipulation.' subCommands = [ ('install', None, Install, "Install UserBase on an Axiom database"), ('create', None, Create, "Create a new user"), ('disable', None, Disable, "Disable an existing user"), ('list', None, List, "List users in an Axiom database"), ] def getStore(self): return self.parent.getStore() class Extract(axiomatic.AxiomaticCommand): name = 'extract-user' description = 'Remove an account from the login system, moving its associated database to the filesystem.' optParameters = [ ('address', 'a', None, 'localpart@domain-format identifier of the user store to extract.'), ('destination', 'd', None, 'Directory into which to extract the user store.')] def extractSubStore(self, localpart, domain, destinationPath): siteStore = self.parent.getStore() la = siteStore.findFirst( userbase.LoginMethod, attributes.AND(userbase.LoginMethod.localpart == localpart, userbase.LoginMethod.domain == domain)).account userbase.extractUserStore(la, destinationPath) def postOptions(self): localpart, domain = self.decodeCommandLine(self['address']).split('@', 1) destinationPath = filepath.FilePath( self.decodeCommandLine(self['destination'])).child(localpart + '@' + domain + '.axiom') self.extractSubStore(localpart, domain, destinationPath) class Insert(axiomatic.AxiomaticCommand): name = 'insert-user' description = 'Insert a user store, such as one extracted with "extract-user", into a site store and login system.' optParameters = [ ('userstore', 'u', None, 'Path to user store to be inserted.') ] def postOptions(self): userbase.insertUserStore(self.parent.getStore(), filepath.FilePath(self.decodeCommandLine(self['userstore']))) Axiom-0.6.0/axiom/scripts/0000755000175000017500000000000011304543322015307 5ustar exarkunexarkunAxiom-0.6.0/axiom/scripts/__init__.py0000644000175000017500000000000010272262634017415 0ustar exarkunexarkunAxiom-0.6.0/axiom/scripts/axiomatic.py0000644000175000017500000001344211217776571017664 0ustar exarkunexarkun# -*- test-case-name: axiomatic.test.test_axiomatic -*- from zope.interface import directlyProvides import os import sys import glob import errno import signal from twisted import plugin from twisted.python import usage from twisted.python.runtime import platform from twisted.scripts import twistd from axiom import iaxiom class AxiomaticSubCommandMixin(object): store = property(lambda self: self.parent.getStore()) def decodeCommandLine(self, cmdline): """Turn a byte string from the command line into a unicode string. """ codec = getattr(sys.stdin, 'encoding', None) or sys.getdefaultencoding() return unicode(cmdline, codec) class _metaASC(type): def __new__(cls, name, bases, attrs): newcls = type.__new__(cls, name, bases, attrs) if not (newcls.__name__ == 'AxiomaticCommand' and newcls.__module__ == _metaASC.__module__): directlyProvides(newcls, plugin.IPlugin, iaxiom.IAxiomaticCommand) return newcls class AxiomaticSubCommand(usage.Options, AxiomaticSubCommandMixin): pass class AxiomaticCommand(usage.Options, AxiomaticSubCommandMixin): __metaclass__ = _metaASC class PIDMixin: def _sendSignal(self, signal): if platform.isWinNT(): raise usage.UsageError("You can't send signals on Windows (XXX TODO)") dbdir = self.parent.getStoreDirectory() serverpid = int(file(os.path.join(dbdir, 'run', 'axiomatic.pid')).read()) os.kill(serverpid, signal) return serverpid def signalServer(self, signal): try: return self._sendSignal(signal) except (OSError, IOError), e: if e.errno in (errno.ENOENT, errno.ESRCH): raise usage.UsageError('There is no server running from the Axiom database %r.' % (self.parent.getStoreDirectory(),)) else: raise class Stop(usage.Options, PIDMixin): def postOptions(self): self.signalServer(signal.SIGINT) class Status(usage.Options, PIDMixin): def postOptions(self): dbdir = self.parent.getStoreDirectory() serverpid = self.signalServer(0) print 'A server is running from the Axiom database %r, PID %d.' % (dbdir, serverpid) class Start(twistd.ServerOptions): run = staticmethod(twistd.run) def subCommands(): raise AttributeError() subCommands = property(subCommands) def getArguments(self, store, args): run = store.dbdir.child("run") logs = run.child("logs") if "--logfile" not in args and "-l" not in args and "--nodaemon" not in args and "-n" not in args: if not logs.exists(): logs.makedirs() args.extend(["--logfile", logs.child("axiomatic.log").path]) if not platform.isWindows() and "--pidfile" not in args: args.extend(["--pidfile", run.child("axiomatic.pid").path]) args.extend(["axiomatic-start", "--dbdir", store.dbdir.path]) return args def parseOptions(self, args): if "--help" in args: self.opt_help() else: # If a reactor is being selected, it must be done before the store # is opened, since that may execute arbitrary application code # which may in turn install the default reactor. if "--reactor" in args: reactorIndex = args.index("--reactor") shortName = args[reactorIndex + 1] del args[reactorIndex:reactorIndex + 2] self.opt_reactor(shortName) sys.argv[1:] = self.getArguments(self.parent.getStore(), args) self.run() class Options(usage.Options): def subCommands(): def get(self): yield ('start', None, Start, 'Launch the given Axiom database') if not platform.isWinNT(): yield ('stop', None, Stop, 'Stop the server running from the given Axiom database') yield ('status', None, Status, 'Report whether a server is running from the given Axiom database') from axiom import plugins for plg in plugin.getPlugins(iaxiom.IAxiomaticCommand, plugins): try: yield (plg.name, None, plg, plg.description) except AttributeError: raise RuntimeError("Maldefined plugin: %r" % (plg,)) return get, subCommands = property(*subCommands()) optParameters = [ ('dbdir', 'd', None, 'Path containing axiom database to configure/create'), ] optFlags = [ ('debug', 'b', 'Enable Axiom-level debug logging')] store = None def usedb(self, potentialdb): yn = raw_input("Use database %r? (Y/n) " % (potentialdb,)) if yn.lower() in ('y', 'yes', ''): self['dbdir'] = potentialdb else: raise usage.UsageError('Select another database with the -d option, then.') def getStoreDirectory(self): if self['dbdir'] is None: possibilities = glob.glob('*.axiom') if len(possibilities) > 1: raise usage.UsageError( "Multiple databases found here, please select one with " "the -d option: %s" % (' '.join(possibilities),)) elif len(possibilities) == 1: self.usedb(possibilities[0]) else: self.usedb(self.subCommand + '.axiom') return self['dbdir'] def getStore(self): from axiom.store import Store if self.store is None: self.store = Store(self.getStoreDirectory(), debug=self['debug']) return self.store def postOptions(self): if self.store is not None: self.store.close() def main(argv=None): o = Options() try: o.parseOptions(argv) except usage.UsageError, e: raise SystemExit(str(e)) Axiom-0.6.0/axiom/scripts/pysql.py0000644000175000017500000000056510330527357017047 0ustar exarkunexarkun import sys import readline # Imported for its side-effects import traceback from pysqlite2.dbapi2 import connect from pprint import pprint con = connect(sys.argv[1]) cur = con.cursor() while True: try: cur.execute(raw_input("SQL> ")) results = cur.fetchall() if results: pprint(results) except: traceback.print_exc() Axiom-0.6.0/axiom/test/0000755000175000017500000000000011304543322014577 5ustar exarkunexarkunAxiom-0.6.0/axiom/test/historic/0000755000175000017500000000000011304543322016423 5ustar exarkunexarkunAxiom-0.6.0/axiom/test/historic/__init__.py0000644000175000017500000000005610410077657020547 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic -*- Axiom-0.6.0/axiom/test/historic/account1to2.axiom.tbz20000644000175000017500000000411210335225603022504 0ustar exarkunexarkunBZh91AY&SYq4 @ V!m5'SOQCF#& dd'ATfiPOPC@A22i&0#&44b FLh2!L4h2d2a@41M4dFd&AɀMF@4i&LL  ɦdш10 @hM24ѠdɓIL)桤MSQS6=COSM =A444hM46T,bVy$-2~@$ׁS֒-RޅBDTn)dBY0}X#IIUT(B"h@"HIzWU\]2vHfh΍Lj+2ZVx/$RGv3.y*ʳ&6^<=8LUTMʋ*](e-6Do qZ!W!.kZe5׽[?:mw6SC客lw.p_,)R3a>fz q>QB1>kKIk2YK c(@⇵ߜ&pۦU;7:q@) apO1:QxzhA5rEiEɱ_w.7>Wo;z$q0I) '}HbNm۲IVdaou0=3zDUH>׌67&+ρF'168\(Ҕ8Ԍ5y f&2IdͽU[oJw{WCaXaa~}I fFrY$$!dYJgCQt Rh,b0(.^ΪRi%XK,e3G8L8/$Gn9xxMmmʪJTN#}k+Ħ a%i\8y1Q0LSj8ܓR2l9㸢cA:hI!:E*FRF &14v_q,pڧdkץԨyZizUzRe*U{ZQ%H*yJH >8Axiom-0.6.0/axiom/test/historic/catalog1to2.axiom.tbz20000644000175000017500000000247510437615621022502 0ustar exarkunexarkunBZh91AY&SY?/J8@eΎk@iOTړCGS@2@h4h'4&OLA hfC44h4ڍ4I@@ 4h1 M0dib`ɀd0dd&HшѤMAd4C ӔdV$!A07N .=I%(aD$S&{LXqa#?<}ɞ@=2Xؠ|iP߼(2Y\J3x CEP!UABL4Z-kcb 4E.8,#*wp$j,~.Nc (+ ~љ)Ԩ(2BD!,OבݢpcUiX"!$:fkj@ -EpJWs@0iuM əeYRNV4D.:ҕVH [e]{Aw>:Ӏ,yOoE1˪T&VLI=wYkm֥-ӒDBc`Yr AB0\(0F*)oov @jMM։e LHQQq] *n!*PJC7omAB" DP N4!H a+H 8gAxiom-0.6.0/axiom/test/historic/loginMethod1to2.axiom.tbz20000644000175000017500000000515610354323260023331 0ustar exarkunexarkunBZh91AY&SY:B 4@ }MwU%[@$%M!i1'f  F4F 4TSę=GCji4 @ hd0FCFMa MF&CF 4 2$HЧMAz=)hF C@AM0z!z!4d1Szj=B &LCihi4hdёёhd4h2@ hRD14TS0$=OS4 Ѡ=M 4)uLw#%t3G5$`lXLЍ]WY3;E6p 1Cz !x*K+׃Z"yI$(6۫|X@5TTo猪6Ā "ZxyoMOdj&hBl'eB ;Jds&Y$,.Ft-A J[ɍOf=V(T}Qb4&I5(A;yn$rkmRļ0/صmj)JY ɰu"3 d!˔L  ?7wlsEsC,X]J4'b7THM-qH_<̭> yRG)T͂S@02%S wsؿ)flڵlf laI:_?+׾%,O*sI LV"- L,ߙ+ 8 qVI"! Y( bD: ۚAR,q|DFln/[zq8PIъeED0_8"BWM@L"WnFWoo>*:t`gdfizM~iu[M )0W,%'K&$Ș*='X >ø'\%4l3$'2((H:S*)UF V7,v]Wv(ŀ]nUS!WF!D l$еR!Crq;1Qt`p/iHk($q-&62&{3İ6x xtEç{oia8r]I?%gMdq80Y~:LNl%6)JiR3$X`ŌL䅁f.anlX"JOa2m1ǵGm6!K|^ d.óRF.l% 5@MS+OoOJߒIE({INI4Lw:.otQ7>eqr,zkA>lյؗtƂxmJzܙ {Lўwv,1k]z >^J|Y ICUoiȝRW;Rl[fW5Y*HIIrWČ}>1.5)ȡ&%I1BZ2VkY]LiKGSDɗm$JzNZU%5%$| f©4vrFPQ&/imhi2Ȗܺlݮi; U%$U/%jW6|q0zTbUUR&!GU8Zΰ1bs-G;rLɋuG_OWL~} zfYJpzv쒈@(RnP(jR"lNN6J!/RK)z DRG7^3ܑN$0iAxiom-0.6.0/axiom/test/historic/manhole1to2.axiom.tbz20000644000175000017500000000436211211011765022476 0ustar exarkunexarkunBZh91AY&SYm HP INV;IP tMOFSOB4= zL M 44C#zC@Ѡ1!hFO'cMFL4&Lɡ11=C&!L#O@L!L@ɐ1i0CF&0a1&Mhhh4`z%@&` C@ @ɐ1i0CF&0a1&Mhhh4`"B A5<)OHz ɠfzOS\"IytQ`hGbx@?-t&P TjeBƚD6 HbɅHm%W'byGgFe2" t!"A% 4Qd' 8)W/OIhYX'^ɲâҀkcm[EA0sClf -&J: UWHl  ILc61 !$b(v;O[6c+;D1pvTl2Rk_>H/TGBHG2[b Fn;JԌnNW/x~S~h;Gxi(Oϟ$`p"wUl*z.݄nwūVOmq6mwK:GIjV0sZ$?kB Z`03poyA/UU3"ȴqD9< G ,һPZL4J F'T @B"> 9AU|Ӱ4>HhWX_JcU.L( Ҕ ]8Յ$p\PB3}bW O|MDӋ'o|$<_u{9r "(x=גN:̕kU\:HJL"v1۲{MDX~M]%1(<!K#GJ\gqUZō{o,"gUH~}BNtZ>ח*i^1Ќn˜$rK8.σC jj}^xOw[Nb8 P ݂Xv:u,ꮖDOL&0%%Js@#DƒXԸ ,nX;ܳ''J%9DwYgC?I.i*V- yi2Fei4I =~ pҼ1nV"&Y9H4*4 @7-)4*VzPJ@xcL gRwfC-J}>8GB10/B%cKcH6.yHīvN۴]GVт3;B/.ݔ&lOT J gb݈7ע(bgP,$"p!KD2{!%— E6ό.R3!&YVfipv:wF 󺌁߀Mpp.4B9-EU2IUhRb[Dd(s_d0y*^(:mH+ \1K(QB,ʅA .= bGBP; :)qDDW䁐$Nc@4&%1C16  {Aw$S ~}Axiom-0.6.0/axiom/test/historic/parentHook2to3.axiom.tbz20000644000175000017500000000511011224737657023202 0ustar exarkunexarkunBZh91AY&SYo 2P݄YF3 Jqu!=SOT?J3GI mOFƠ4 j@4P13SڀS4ɩmM4h@h42zѣA4ddzC&120F 0 L@ 14i4ɂi41LdhbFC@4iL&LM !ш04d2d`@&CC4$HA2  xL'O)Fj aL44#h##&F1O?%3kAשL- ,~?Gg˽F.>\F(F'q7. <7nܹ`_В=WkJ 9{{O{ gy9aM=!4pG$ FX0NR>9Q#k`t-f:kᤕMRmg*9ZDAV6EeIhm O"Ia:t{ps3פ;I癏)h"R.!pW9*{&Ia`95M`+=qJ`lEW.xDȒ65%Jn|}y_or6.FJ"pSVtܙU7iq);qڿ_8;;ݍ?#&-r×;M{1?oKsz4z ,6'sH@AMr@&azxRh*F p H PaxY QC {Ie]0'O=y4`DL8 f$=tegًq!,H ыT(+=YM PZMr(!H3X)Aչ`7e(^8⋛rbv#mIZA V#r:®L%VQ3 #p`^YZ3k=Sl4a>N{OvA$l8:lGD2$.1SrEB"ڼ skLbZ ϼ&jٗGӼ-GlcEG [my,P c^:W9;)"%/F.ƵF+[v_]m[g]UsX>C]U9Ͳ5n.puJWD^gw>xr]iCo6Z#A o QYD #LMH{cYPus0:[2 'A0`I# 1I^aR-X:j%Df8xH T:Iq-oG,bܒfRj _ 2\F embI4a%q<A:%^gP0Fyb HCm R $ h<(Vjh%%hBX%"9P Kn%#:;PO0 YX#Q1- Fݠ+V3[R1j@Pį.=k4[ݠтWn(u`! F)UNʺ`;0E116Lk8eGhJќZLDC,V@wLGXpȆOqD=ѲLՔ<9͙$9dXF,f$'`JWd]Y `5B8.u}Jz[P{.1 ހ5k38rK^jE@,)YH+Ъ[.f3/Eü!V>mHߚ\>87_WCAh;9BUB2J ’Hh~Mf)3䔆jz4#i Mh쩩4Ӆ cl! m$Ma\I1ܑN$#[Axiom-0.6.0/axiom/test/historic/parentHook3to4.axiom.tbz20000644000175000017500000000357611224737657023222 0ustar exarkunexarkunBZh91AY&SY=5  `10ť/3fwg! 4mOJzi<4mɠ =FѦzii;R- wmU-NT6lC\4$W6r\@e #eĜ0nBqTLEբ')g coOZVF욱{Ȱڑ(p~@hB컘* K_2$HL 󔛑Kwmo`ER?m㽵b6l}*v[`ti3# B3+ %bխ8z2m^SA12Pz" @"Z&(|~e 3{55`b;L= 5JU9rݛbQJKȥUb$tDGCJ̛E\{3#Js 0 Q!d VkC=mm ]6' ,EM{kdq/ń^982`hx 2G "ݤy!v4Z|\J`P&˕"qkZ($|Mt"'r$އtBeU2[L"OZ?x7tE [I Ć V-guiZ]t0?y$ķ1J >il9g^jH4qOiL[<]) Q;wG? 1Y¿S̱M/ 1jMෂ l]ΐ1zg=ۯX ;};]̔ X\\){mpv'y1V e W/J{uo,6{ [nNeTR h7 @E?(A|OZ`$ &{BZ BE8L tH O^ k66?Z▸2%3O*gy~w5aGYd)Q%/iTw :'1;X*Τ${5G.䮫fPU$0 0b LC*"!`E7iYe`6l]uo@V R,ʖcDSʬB,eJL72 g3@퐐v[vW:޾CV1Bf &IiLPHgj+ *J":yl)^R"c7Hv^d0hDRy/3^ ݜ;V r(\gIEFܫuH+!оGB (%_z E$%lwAeǗ AH2 Ҹ"@0Os%S&5TUPfbI5(% %(LB`afօg]BCP<Axiom-0.6.0/axiom/test/historic/processor1to2.axiom.tbz20000644000175000017500000000230110461453555023076 0ustar exarkunexarkunBZh91AY&SY1Ck0 %)o+(i6h2i L!zzjzF&&&C#C 4  L`& M4S1S4b  h Fh ORΙ`Yil@f" ?(PFMcC[y!$ZB Xoz:W6 }"_ u#%HvBD<4L ) w*0`ߡzufE!ؕ%U#R*hnܣFU ` sh$71w둍2;xVId9o۱;;6]0iWЀ$Еi4Tvjȡ"}vH=V٘͛"Rn]s P (tй)8O0p rl΋̌B+ۼWD|t_$Y$i"9"E:a1# * h$*QK& s))q%HSA f&'!hҀRUJD#WG53n ): QCau!|x{`еn7ǯ V҃)6( $bC7.p cAxiom-0.6.0/axiom/test/historic/scheduler1to2.axiom.tbz20000644000175000017500000000310011224737657023041 0ustar exarkunexarkunBZh91AY&SYӚ!5( `0 ťݓT@ \$i452`٩Q e4cM =Qh#!P0Mɦ424*dbii 2L0L40HO4hQ&24zAOQ&Li###?TALF 0CC`L&biEM mhfɑF P>("pDc>H=c2dH{u͏Ƞ-Kc HqI\@1XdX-f24XgjrI8tRd̑˗%' .DpB]+VCb" L6IG8CQ(xpDRPPAJF\*j) Crx\\']鎱7_ Z!bYY͟ v`;:LjjGO[=D(i&22ocJ&z$FtD=$ S![Xŭ{䷞dBkf>՗0IPLjY}%:t_ BMb@E1Z?R܄p_~Nj;v_]FjeGe ztl E{Pb}u϶k`BGe\1!mcE 94tg>7Nv6%;;zz}E. -"лcRjx373DcQSЏ)}]"Ĩ!b ;$L!Ul!"E"8;]Dd!bh$JW]ɛqXTL@ ڦ"d8E@4^봀du*DIERP LT&A@Ɗ%4Sh-gj:h<021.2 &[<w4Wl}84Fte$DCG&1Z5kU&v<ѿ"_*T !w}.49쐻#"-̘o0[YNXK ..@eQ-@!g@364تDBT@P$MEI EO)E`C2kH sSAxiom-0.6.0/axiom/test/historic/stub_account1to2.py0000644000175000017500000000076310410077657022214 0ustar exarkunexarkun from axiom.userbase import LoginSystem from axiom.test.test_userbase import GarbageProtocolHandler def createDatabase(s): ls = LoginSystem(store=s) ls.installOn(s) acc = ls.addAccount(u'test', u'example.com', 'asdf') ss = acc.avatars.open() gph = GarbageProtocolHandler(store=ss, garbage=7) gph.installOn(ss) # ls.addAccount(u'test2', u'example.com', 'ghjk') from axiom.test.historic.stubloader import saveStub if __name__ == '__main__': saveStub(createDatabase) Axiom-0.6.0/axiom/test/historic/stub_catalog1to2.py0000644000175000017500000000111510461453555022163 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_catalog1to2 -*- from axiom.item import Item from axiom.attributes import text from axiom.tags import Catalog from axiom.test.historic.stubloader import saveStub class Dummy(Item): attribute = text(doc="dummy attribute") def createDatabase(s): """ Populate the given Store with a Catalog and some Tags. """ c = Catalog(store=s) c.tag(c, u"internal") c.tag(s, u"internal") i = Dummy(store=s) c.tag(i, u"external") c.tag(i, u"green") if __name__ == '__main__': saveStub(createDatabase, 6917) Axiom-0.6.0/axiom/test/historic/stub_loginMethod1to2.py0000644000175000017500000000100610410077657023020 0ustar exarkunexarkun from axiom.userbase import LoginSystem from axiom.test.test_userbase import GarbageProtocolHandler from axiom.test.historic.test_loginMethod1to2 import CREDENTIALS, GARBAGE_LEVEL def createDatabase(s): ls = LoginSystem(store=s) ls.installOn(s) acc = ls.addAccount(*CREDENTIALS) ss = acc.avatars.open() gph = GarbageProtocolHandler(store=ss, garbage=GARBAGE_LEVEL) gph.installOn(ss) from axiom.test.historic.stubloader import saveStub if __name__ == '__main__': saveStub(createDatabase) Axiom-0.6.0/axiom/test/historic/stub_manhole1to2.py0000644000175000017500000000062211211011765022161 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_manhole1to2 -*- # Copyright 2008 Divmod, Inc. See LICENSE for details. from axiom.dependency import installOn from axiom.batch import BatchManholePowerup from axiom.test.historic.stubloader import saveStub def createDatabase(store): installOn(BatchManholePowerup(store=store), store) if __name__ == '__main__': saveStub(createDatabase, 16829) Axiom-0.6.0/axiom/test/historic/stub_parentHook2to3.py0000644000175000017500000000125211224737657022675 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_parentHook2to3 -*- """ Generate a test stub for upgrading L{_SubSchedulerParentHook} from version 2 to 3, which removes the C{scheduledAt} attribute. """ from axiom.test.historic.stubloader import saveStub from axiom.dependency import installOn from axiom.scheduler import Scheduler, _SubSchedulerParentHook from axiom.substore import SubStore def createDatabase(store): scheduler = Scheduler(store=store) installOn(scheduler, store) installOn( _SubSchedulerParentHook( store=store, loginAccount=SubStore(store=store)), store) if __name__ == '__main__': saveStub(createDatabase, 16800) Axiom-0.6.0/axiom/test/historic/stub_parentHook3to4.py0000644000175000017500000000125111224737657022676 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_parentHook3to4 -*- """ Generate a test stub for upgrading L{_SubSchedulerParentHook} from version 3 to 4, which removes the C{scheduler} attribute. """ from axiom.test.historic.stubloader import saveStub from axiom.dependency import installOn from axiom.scheduler import Scheduler, _SubSchedulerParentHook from axiom.substore import SubStore def createDatabase(store): scheduler = Scheduler(store=store) installOn(scheduler, store) installOn( _SubSchedulerParentHook( store=store, loginAccount=SubStore(store=store)), store) if __name__ == '__main__': saveStub(createDatabase, 17606) Axiom-0.6.0/axiom/test/historic/stub_processor1to2.py0000644000175000017500000000115510461453555022574 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_processor1to2 -*- from axiom.item import Item from axiom.attributes import text from axiom.batch import processor from axiom.test.historic.stubloader import saveStub class Dummy(Item): __module__ = 'axiom.test.historic.stub_processor1to2' typeName = 'axiom_test_historic_stub_processor1to2_dummy' attribute = text() DummyProcessor = processor(Dummy) def createDatabase(s): """ Put a processor of some sort into a Store. """ t = DummyProcessor(store=s) print t.typeName if __name__ == '__main__': saveStub(createDatabase, 7973) Axiom-0.6.0/axiom/test/historic/stub_scheduler1to2.py0000644000175000017500000000065011224737657022540 0ustar exarkunexarkun# test-case-name: axiom.test.historic.test_scheduler1to2 """ Database creator for the test for the upgrade of Scheduler from version 1 to version 2. """ from axiom.test.historic.stubloader import saveStub from axiom.scheduler import Scheduler from axiom.dependency import installOn def createDatabase(store): installOn(Scheduler(store=store), store) if __name__ == '__main__': saveStub(createDatabase, 17606) Axiom-0.6.0/axiom/test/historic/stub_subStoreStartupService1to2.py0000644000175000017500000000272310453637107025267 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_subStoreStartupService1to2 -*- from zope.interface import implements from twisted.application.service import IService from axiom.item import Item from axiom.attributes import boolean from axiom.substore import SubStore, SubStoreStartupService from axiom.test.historic.stubloader import saveStub class DummyService(Item): """ Service which does nothing but mark itself as run, if it's ever run. After the upgrader it should not be run. """ typeName = 'substore_service_upgrade_stub_service' everStarted = boolean(default=False) implements(IService) name = property(lambda : "sucky-service") running = property(lambda : False) def setName(self, name): pass def setServiceParent(self, parent): pass def disownServiceParent(self): pass def startService(self): self.everStarted = True def stopService(self): pass def privilegedStartService(self): pass def createDatabase(s): """ Create a store which contains a substore-service-starter item powered up for IService, and a substore, which contains a service that should not be started after the upgrader runs. """ ssi = SubStore.createNew(s, ["sub", "test"]) ss = ssi.open() ds = DummyService(store=ss) ss.powerUp(ds, IService) ssss = SubStoreStartupService(store=s).installOn(s) if __name__ == '__main__': saveStub(createDatabase, 7615) Axiom-0.6.0/axiom/test/historic/stub_subscheduler1to2.py0000644000175000017500000000066411224737657023257 0ustar exarkunexarkun# test-case-name: axiom.test.historic.test_subscheduler1to2 """ Database creator for the test for the upgrade of SubScheduler from version 1 to version 2. """ from axiom.test.historic.stubloader import saveStub from axiom.scheduler import SubScheduler from axiom.dependency import installOn def createDatabase(store): installOn(SubScheduler(store=store), store) if __name__ == '__main__': saveStub(createDatabase, 17606) Axiom-0.6.0/axiom/test/historic/stub_textlist.py0000644000175000017500000000077711211011765021723 0ustar exarkunexarkun# -*- test-case-name: axiom.test.historic.test_textlist -*- from axiom.item import Item from axiom.attributes import textlist from axiom.test.historic.stubloader import saveStub class Dummy(Item): typeName = 'axiom_textlist_dummy' schemaVersion = 1 attribute = textlist(doc="a textlist") def createDatabase(s): """ Populate the given Store with some Dummy items. """ Dummy(store=s, attribute=[u'foo', u'bar']) if __name__ == '__main__': saveStub(createDatabase, 11858) Axiom-0.6.0/axiom/test/historic/stubloader.py0000644000175000017500000000421310567112556021154 0ustar exarkunexarkun import os import sys import shutil import tarfile import inspect from twisted.trial import unittest from twisted.application.service import IService from axiom.store import Store def saveStub(funcobj, revision): """ Create a stub database and populate it using the given function. @param funcobj: A one-argument callable which will be invoked with an Axiom Store instance and should add to it the old state which will be used to test an upgrade. @param revision: An SVN revision of trunk at which it was possible it is possible for funcobj to create the necessary state. """ # You may notice certain files don't pass the second argument. They don't # work any more. Please feel free to update them with the revision number # they were created at. filename = inspect.getfile(funcobj) dbfn = os.path.join( os.path.dirname(filename), os.path.basename(filename).split("stub_")[1].split('.py')[0]+'.axiom') s = Store(dbfn) s.transact(funcobj, s) s.close() tarball = tarfile.open(dbfn+'.tbz2', 'w:bz2') tarball.add(os.path.basename(dbfn)) tarball.close() shutil.rmtree(dbfn) class StubbedTest(unittest.TestCase): def openLegacyStore(self): """ Extract the Store tarball associated with this test, open it, and return it. """ temp = self.mktemp() f = sys.modules[self.__module__].__file__ dfn = os.path.join( os.path.dirname(f), os.path.basename(f).split("test_")[1].split('.py')[0]+'.axiom') arcname = dfn + '.tbz2' tarball = tarfile.open(arcname, 'r:bz2') for member in tarball.getnames(): tarball.extract(member, temp) return Store(os.path.join(temp, os.path.basename(dfn))) def setUp(self): """ Prepare to test a stub by opening and then fully upgrading the legacy store. """ self.store = self.openLegacyStore() self.service = IService(self.store) self.service.startService() return self.store.whenFullyUpgraded() def tearDown(self): return self.service.stopService() Axiom-0.6.0/axiom/test/historic/subStoreStartupService1to2.axiom.tbz20000644000175000017500000000332510453637107025576 0ustar exarkunexarkunBZh91AY&SYX 3B(4@X4FMI&S4D140zd0a0&4=F05`d &&0 040`B b0LAFa0F&LALF 0CC`L&bi0 2a`a`L LM0 *Hɡ1'MzS4~`<&=2`ѲGzdO4)?]#?fj;V1HPvh߆<|qjTs-J-Paբ?-BI֢c999"9y6Trߵo7;1EB#&pR-#aQXEAި:J3xnToY뾽Gٶ&ATK`FVM=H$t=z3{>b'<)"*GMk? _ # 'L-HPGjh{ R1iD/lZ$`֩uyxl֖uLW"c o𯾮[f aNOu?:^O1*G#G,jo#[YצќrH"=3ޓ%ɺNw;~oiᶏ~GZ~'Z%F׽&YK^JjL|it4)JSHІiz8#Αk6BY"ΙSc wբMޏmWkc?y|C JVb\JgbXJv1*Qt4`0NTR12nmhfM- 8t$~⤬X)x>OF|#LQM3oB.72Ib.I&R*bKJ+1< 3SKŤ`щuwdPSK"B)5 \ խaq8g7DO#j&MIқvMc[xѷnӅwf":Wfv\oGwum{Zֶm󺓙n*;c[Rb%dRT$*PC^уbwֳ *8pJ,t1Pn$AkTuKL[XK"ni]s:aS#$"Dl. 2R()mM `:#E3-C=*&.i*G+b0u<Dʧb^!ƱY66<5x|o7R'Xb,pHipr;, ;IyIKnGXAI>i!!A$;|mAzmR.xs<,V!@"5{>%& Qzm6:]J%-z`LQy4@&<{Ip#(CHτ#8ٌ:~ FzcEDD4Bn޸x^k1YSm0i~T谔  Y!R"$TFq-8[mU ])pV4l ˋl@BRƃ>ïPdDg6T:HQْŰڼ^тFе=\.U(c$Bax2 m8v̌sQ/+T]YZGſmΐԒ:,^da /ƚ#eX·~;a [ Z>q]X F2+!jRArHiȅkD3  fw$S 0Axiom-0.6.0/axiom/test/upgrade_fixtures/0000755000175000017500000000000011304543322020157 5ustar exarkunexarkunAxiom-0.6.0/axiom/test/upgrade_fixtures/__init__.py0000644000175000017500000000001010763355106022270 0ustar exarkunexarkun""" """ Axiom-0.6.0/axiom/test/upgrade_fixtures/override_init_new.py0000644000175000017500000000154310771736515024266 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_overridenInitializerInUpgrader -*- from axiom.attributes import integer, inmemory from axiom.item import Item, normalize from axiom.upgrade import registerAttributeCopyingUpgrader class Simple(Item): # Don't import the old version, otherwise its schema will get loaded. This # is valid in the upgrade tests, but not at other times. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.override_init_old.Simple") schemaVersion = 2 dummy = integer() verify = inmemory() def __init__(self, **stuff): """ Override Item's __init__ to re-retrieve this Item from the store. """ Item.__init__(self, **stuff) self.verify = (self, self.store.getItemByID(self.storeID)) registerAttributeCopyingUpgrader(Simple, 1, 2) Axiom-0.6.0/axiom/test/upgrade_fixtures/override_init_old.py0000644000175000017500000000042010764345053024237 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_overridenInitializerInUpgrader -*- from axiom.attributes import integer from axiom.item import Item class Simple(Item): """ A simple item that doesn't do much. """ dummy = integer() Axiom-0.6.0/axiom/test/upgrade_fixtures/reentrant_new.py0000644000175000017500000000134110771736515023422 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_reentrantUpgraderFailure -*- from axiom.attributes import integer, reference from axiom.item import Item, normalize from axiom.upgrade import registerUpgrader class Simple(Item): # Don't import the old schema. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.reentrant_old.Simple") schemaVersion = 2 dummy = integer() selfReference = reference() def upgradeSimple1to2(old): # Force the upgrade. selfRef = old.store.getItemByID(old.storeID) return old.upgradeVersion( old.typeName, 1, 2, dummy=old.dummy, selfReference=selfRef) registerUpgrader(upgradeSimple1to2, Simple.typeName, 1, 2) Axiom-0.6.0/axiom/test/upgrade_fixtures/reentrant_old.py0000644000175000017500000000041210764345053023400 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_reentrantUpgraderFailure -*- from axiom.attributes import integer from axiom.item import Item class Simple(Item): """ A simple item that doesn't do much. """ dummy = integer() Axiom-0.6.0/axiom/test/upgrade_fixtures/replace_attribute_new.py0000644000175000017500000000257410771736515025127 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_referenceModifiedByForeignUpgrader -*- from axiom.attributes import reference, integer from axiom.item import Item, normalize from axiom.upgrade import registerUpgrader NEW_VALUE = 71 class Referrer(Item): """ An item which just refers to another kind of item which will be upgraded. """ # Don't import the old schema. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.replace_attribute_old.Referrer") referee = reference() class Referee(Item): """ An item the upgrader of which replaces itself on L{Referrer} with a new instance with a different value. """ # Don't import the old schema. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.replace_attribute_old.Referee") schemaVersion = 2 value = integer() def referee1to2(oldReferee): """ Find the L{Referrer} which refers to C{oldReferee} and replace its C{referee} attribute with a new, different L{Referee} item with a different C{value}. """ store = oldReferee.store [referrer] = list(store.query(Referrer, Referrer.referee == oldReferee)) referrer.referee = Referee(store=store, value=NEW_VALUE) return oldReferee.upgradeVersion( Referee.typeName, 1, 2, value=oldReferee.value) registerUpgrader(referee1to2, Referee.typeName, 1, 2) Axiom-0.6.0/axiom/test/upgrade_fixtures/replace_attribute_old.py0000644000175000017500000000101110771736515025075 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_referenceModifiedByForeignUpgrader -*- from axiom.attributes import reference, integer from axiom.item import Item OLD_VALUE = 69 class Referrer(Item): """ An item which just refers to another kind of item which will be upgraded. """ referee = reference() class Referee(Item): """ An item the upgrader of which replaces itself on L{Referrer} with a new instance with a different value. """ value = integer() Axiom-0.6.0/axiom/test/upgrade_fixtures/replace_delete_new.py0000644000175000017500000000262110771736515024357 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_cascadingDeletedReferenceModifiedByForeignUpgrader -*- from axiom.attributes import reference, integer from axiom.item import Item, normalize from axiom.upgrade import registerUpgrader NEW_VALUE = 71 class Referrer(Item): """ An item which just refers to another kind of item which will be upgraded. """ # Don't import the old schema. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.replace_delete_old.Referrer") referee = reference(whenDeleted=reference.CASCADE) class Referee(Item): """ An item the upgrader of which replaces itself on L{Referrer} with a new instance with a different value. """ # Don't import the old schema. -exarkun typeName = normalize( "axiom.test.upgrade_fixtures.replace_delete_old.Referee") schemaVersion = 2 value = integer() def referee1to2(oldReferee): """ Find the L{Referrer} which refers to C{oldReferee} and replace its C{referee} attribute with a new, different L{Referee} item with a different C{value}, and which deletes the original L{Referee}. """ store = oldReferee.store [referrer] = list(store.query(Referrer, Referrer.referee == oldReferee)) referrer.referee = Referee(store=store, value=NEW_VALUE) oldReferee.deleteFromStore() registerUpgrader(referee1to2, Referee.typeName, 1, 2) Axiom-0.6.0/axiom/test/upgrade_fixtures/replace_delete_old.py0000644000175000017500000000106610771736515024346 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DuringUpgradeTests.test_cascadingDeletedReferenceModifiedByForeignUpgrader -*- from axiom.attributes import reference, integer from axiom.item import Item OLD_VALUE = 69 class Referrer(Item): """ An item which just refers to another kind of item which will be upgraded. """ referee = reference(whenDeleted=reference.CASCADE) class Referee(Item): """ An item the upgrader of which replaces itself on L{Referrer} with a new instance with a different value. """ value = integer() Axiom-0.6.0/axiom/test/upgrade_fixtures/two_upgrades_new.py0000644000175000017500000000160010771736515024121 0ustar exarkunexarkun from axiom.attributes import integer, reference from axiom.item import Item, normalize from axiom.upgrade import registerUpgrader class Referrer(Item): # Don't import the old schema. -exarkun typeName = normalize( 'axiom.test.upgrade_fixtures.two_upgrades_old.Referrer') schemaVersion = 2 referee = reference() def upgradeReferrer1to2(old): return old.upgradeVersion( old.typeName, 1, 2, referee=old.referee) registerUpgrader(upgradeReferrer1to2, Referrer.typeName, 1, 2) class Referee(Item): # Don't import the old schema. -exarkun typeName = normalize( 'axiom.test.upgrade_fixtures.two_upgrades_old.Referee') schemaVersion = 2 dummy = integer() def upgradeReferee1to2(old): return old.upgradeVersion( old.typeName, 1, 2, dummy=old.dummy) registerUpgrader(upgradeReferee1to2, Referee.typeName, 1, 2) Axiom-0.6.0/axiom/test/upgrade_fixtures/two_upgrades_old.py0000644000175000017500000000025310763355106024103 0ustar exarkunexarkun from axiom.attributes import integer, reference from axiom.item import Item class Referrer(Item): referee = reference() class Referee(Item): dummy = integer() Axiom-0.6.0/axiom/test/__init__.py0000644000175000017500000000000010272262634016705 0ustar exarkunexarkunAxiom-0.6.0/axiom/test/brokenapp.py0000644000175000017500000000252710527416666017160 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading -*- from axiom.item import Item from axiom.attributes import text, integer, reference, inmemory from axiom.upgrade import registerUpgrader class UpgradersAreBrokenHere(Exception): """ The upgraders in this module are broken. They raise this exception. """ class ActivateHelper: activated = 0 def activate(self): self.activated += 1 class Adventurer(ActivateHelper, Item): typeName = 'test_app_player' schemaVersion = 2 name = text() activated = inmemory() class Sword(ActivateHelper, Item): typeName = 'test_app_sword' schemaVersion = 2 name = text() damagePerHit = integer() owner = reference() activated = inmemory() def upgradePlayerAndSword(oldplayer): newplayer = oldplayer.upgradeVersion('test_app_player', 1, 2) newplayer.name = oldplayer.name oldsword = oldplayer.sword newsword = oldsword.upgradeVersion('test_app_sword', 1, 2) newsword.name = oldsword.name newsword.damagePerHit = oldsword.hurtfulness * 2 newsword.owner = newplayer return newplayer, newsword def player1to2(oldplayer): raise UpgradersAreBrokenHere() def sword1to2(oldsword): raise UpgradersAreBrokenHere() registerUpgrader(sword1to2, 'test_app_sword', 1, 2) registerUpgrader(player1to2, 'test_app_player', 1, 2) Axiom-0.6.0/axiom/test/cursortest.py0000644000175000017500000001171211015054134017365 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_pysqlite2 -*- """ Test code for any cursor implementation which is to work with Axiom. This probably isn't complete. """ from axiom.errors import TimeoutError, TableAlreadyExists, SQLError class StubCursor(object): """ Stand in for an actual database-backed cursor. Used by tests to assert the right calls are made to execute and to make sure errors from execute are handled correctly. @ivar statements: A list of SQL strings which have been executed. @ivar connection: A reference to the L{StubConnection} which created this cursor. """ def __init__(self, connection): self.connection = connection self.statements = [] def execute(self, statement, args=()): """ Capture some SQL for later inspection. """ self.statements.append(statement) class StubConnection(object): """ Stand in for an actual database-backed connection. Used by tests to create L{StubCursors} to easily test behavior of code which interacts with cursors. @ivar cursors: A list of all cursors ever created with this connection. """ def __init__(self): self.cursors = [] def cursor(self): """ Create and return a new L{StubCursor}. """ self.cursors.append(StubCursor(self)) return self.cursors[-1] def timeout(self): """ Induce behavior indicative of a database-level transient failure which might lead to a timeout. """ raise NotImplementedError class ConnectionTestCaseMixin: # The number of seconds we will allow for timeouts in this test suite. TIMEOUT = 5.0 # The amount of time beyond the specified timeout we will allow Axiom to # waste sleeping. This number shouldn't be changed very often, if ever. # We're testing a particular performance feature which we should be able to # rely on. ALLOWED_SLOP = 0.2 def createAxiomConnection(self): raise NotImplementedError("Cannot create Axiom Connection instance.") def createStubConnection(self): raise NotImplementedError("Cannot create Axiom Connection instance.") def createRealConnection(self): """ Create a memory-backed database connection for integration testing. """ raise NotImplementedError("Real connection creation not implemented.") def test_identifyTableCreationError(self): """ When the same table is created twice, we should get a TableAlreadyExists exception. """ con = self.createRealConnection() cur = con.cursor() CREATE_TABLE = "create table foo (bar integer)" cur.execute(CREATE_TABLE) e = self.assertRaises(TableAlreadyExists, cur.execute, CREATE_TABLE) def test_identifyGenericError(self): """ When invalid SQL is issued, we should get a SQLError exception. """ con = self.createRealConnection() cur = con.cursor() INVALID_STATEMENT = "not an SQL string" e = self.assertRaises(SQLError, cur.execute, INVALID_STATEMENT) def test_cursor(self): """ Test that the cursor method can actually create a cursor object. """ stubConnection = self.createStubConnection() axiomConnection = self.createAxiomConnection(stubConnection) axiomCursor = axiomConnection.cursor() self.assertEquals(len(stubConnection.cursors), 1) statement = "SELECT foo FROM bar" axiomCursor.execute(statement) self.assertEquals(len(stubConnection.cursors[0].statements), 1) self.assertEquals(stubConnection.cursors[0].statements[0], statement) def test_timeoutExceeded(self): """ Test that the timeout we pass to the Connection is respected. """ clock = [0] def time(): return clock[0] def sleep(n): clock[0] += n stubConnection = self.createStubConnection() axiomConnection = self.createAxiomConnection(stubConnection, timeout=self.TIMEOUT) axiomCursor = axiomConnection.cursor() axiomCursor.time = time axiomCursor.sleep = sleep def execute(statement, args=()): if time() < self.TIMEOUT * 2: return stubConnection.timeout() return object() stubConnection.cursors[0].execute = execute statement = 'SELECT foo FROM bar' timeoutException = self.assertRaises( TimeoutError, axiomCursor.execute, statement) self.failUnless( self.TIMEOUT <= time() <= self.TIMEOUT + self.ALLOWED_SLOP, "Wallclock duration of execute() call out of bounds.") self.assertEquals(timeoutException.statement, statement) self.assertEquals(timeoutException.timeout, self.TIMEOUT) self.failUnless(isinstance( timeoutException.underlying, self.expectedUnderlyingExceptionClass)) Axiom-0.6.0/axiom/test/deleteswordapp.py0000644000175000017500000000057711046607253020213 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading -*- """ New version of L{axiom.test.oldapp} which upgrades swords by deleting them. """ from axiom.item import Item from axiom.attributes import text from axiom.upgrade import registerDeletionUpgrader class Sword(Item): typeName = 'test_app_sword' schemaVersion = 2 name = text() registerDeletionUpgrader(Sword, 1, 2) Axiom-0.6.0/axiom/test/itemtest.py0000644000175000017500000000023510330527357017017 0ustar exarkunexarkun from axiom import item, attributes class PlainItem(item.Item): typeName = 'axiom_test_plain_item' schemaVersion = 1 plain = attributes.text() Axiom-0.6.0/axiom/test/itemtestmain.py0000644000175000017500000000055711010066072017657 0ustar exarkunexarkunimport sys from axiom import store from twisted.python import filepath def main(storePath, itemID): assert 'axiom.test.itemtest' not in sys.modules, "Test is invalid." st = store.Store(filepath.FilePath(storePath)) item = st.getItemByID(itemID) print item.plain if __name__ == '__main__': main(storePath=sys.argv[1], itemID=int(sys.argv[2])) Axiom-0.6.0/axiom/test/morenewapp.py0000644000175000017500000000561410412631340017331 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.SchemaUpgradeTest.testUpgradeWithMissingVersion -*- from axiom.item import Item from axiom.attributes import text, integer, reference, inmemory from axiom.upgrade import registerUpgrader class ActivateHelper: activated = 0 def activate(self): self.activated += 1 class Adventurer(ActivateHelper, Item): typeName = 'test_app_player' schemaVersion = 2 name = text() activated = inmemory() class InventoryEntry(ActivateHelper, Item): typeName = 'test_app_inv' schemaVersion = 1 owner = reference() owned = reference() activated = inmemory() class Sword(ActivateHelper, Item): typeName = 'test_app_sword' schemaVersion = 3 name = text() damagePerHit = integer() activated = inmemory() def owner(): def get(self): return self.store.findUnique(InventoryEntry, InventoryEntry.owned == self).owner return get, owner = property(*owner()) def sword2to3(oldsword): newsword = oldsword.upgradeVersion('test_app_sword', 2, 3) n = oldsword.store.getOldVersionOf('test_app_sword', 2) itrbl = oldsword.store.query(n) newsword.name = oldsword.name newsword.damagePerHit = oldsword.damagePerHit invent = InventoryEntry(store=newsword.store, owner=oldsword.owner, owned=newsword) return newsword registerUpgrader(sword2to3, 'test_app_sword', 2, 3) ####### DOUBLE-LEGACY UPGRADE SPECTACULAR !! ########### # declare legacy class. from axiom.item import declareLegacyItem declareLegacyItem(typeName = 'test_app_sword', schemaVersion = 2, attributes = dict(name=text(), damagePerHit=integer(), owner=reference(), activated=inmemory())) def upgradePlayerAndSword(oldplayer): newplayer = oldplayer.upgradeVersion('test_app_player', 1, 2) newplayer.name = oldplayer.name oldsword = oldplayer.sword newsword = oldsword.upgradeVersion('test_app_sword', 1, 2, name=oldsword.name, damagePerHit=oldsword.hurtfulness * 2, owner=newplayer) return newplayer, newsword def player1to2(oldplayer): newplayer, newsword = upgradePlayerAndSword(oldplayer) return newplayer def sword1to2(oldsword): oldPlayerType = oldsword.store.getOldVersionOf('test_app_player', 1) oldplayer = list(oldsword.store.query(oldPlayerType, oldPlayerType.sword == oldsword))[0] newplayer, newsword = upgradePlayerAndSword(oldplayer) return newsword registerUpgrader(sword1to2, 'test_app_sword', 1, 2) registerUpgrader(player1to2, 'test_app_player', 1, 2) Axiom-0.6.0/axiom/test/newapp.py0000644000175000017500000000277610332002151016445 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading -*- from axiom.item import Item from axiom.attributes import text, integer, reference, inmemory from axiom.upgrade import registerUpgrader class ActivateHelper: activated = 0 def activate(self): self.activated += 1 class Adventurer(ActivateHelper, Item): typeName = 'test_app_player' schemaVersion = 2 name = text() activated = inmemory() class Sword(ActivateHelper, Item): typeName = 'test_app_sword' schemaVersion = 2 name = text() damagePerHit = integer() owner = reference() activated = inmemory() def upgradePlayerAndSword(oldplayer): newplayer = oldplayer.upgradeVersion('test_app_player', 1, 2) newplayer.name = oldplayer.name oldsword = oldplayer.sword newsword = oldsword.upgradeVersion('test_app_sword', 1, 2) newsword.name = oldsword.name newsword.damagePerHit = oldsword.hurtfulness * 2 newsword.owner = newplayer return newplayer, newsword def player1to2(oldplayer): newplayer, newsword = upgradePlayerAndSword(oldplayer) return newplayer def sword1to2(oldsword): oldPlayerType = oldsword.store.getOldVersionOf('test_app_player', 1) oldplayer = list(oldsword.store.query(oldPlayerType, oldPlayerType.sword == oldsword))[0] newplayer, newsword = upgradePlayerAndSword(oldplayer) return newsword registerUpgrader(sword1to2, 'test_app_sword', 1, 2) registerUpgrader(player1to2, 'test_app_player', 1, 2) Axiom-0.6.0/axiom/test/newcirc.py0000644000175000017500000000172010453637107016614 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DeletionTest.testCircular -*- from axiom.item import Item from axiom.attributes import reference, integer class A(Item): typeName = 'test_circular_a' b = reference() class B(Item): typeName = 'test_circular_b' a = reference() n = integer() schemaVersion = 2 from axiom.upgrade import registerUpgrader def b1to2(oldb): # This upgrader isn't doing anything that actually makes sense; in a # realistic upgrader, you'd probably be changing A around, perhaps deleting # it to destroy old adjunct items and creating a new A. The point is, # s.findUnique(A).b should give back the 'b' that you are upgrading whether # it is run before or after the upgrade. oldb.a.deleteFromStore() newb = oldb.upgradeVersion('test_circular_b', 1, 2) newb.n = oldb.n newb.a = A(store=newb.store, b=newb) return newb registerUpgrader(b1to2, 'test_circular_b', 1, 2) Axiom-0.6.0/axiom/test/newobsolete.py0000644000175000017500000000130410453637107017506 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DeletionTest.testPowerups -*- from axiom.item import Item from axiom.attributes import integer class Obsolete(Item): """ This is a stub placeholder so that axiomInvalidateModule will invalidate the appropriate typeName; it's probably bad practice to declare recent versions of deleted portions of the schema, but that's not what this is testing. """ typeName = 'test_upgrading_obsolete' nothing = integer() schemaVersion = 2 from axiom.upgrade import registerUpgrader def obsolete1toNone(oldObsolete): oldObsolete.deleteFromStore() return None registerUpgrader(obsolete1toNone, 'test_upgrading_obsolete', 1, 2) Axiom-0.6.0/axiom/test/newpath.py0000644000175000017500000000053310452261533016624 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.PathUpgrade.testUpgradePath -*- from axiom.attributes import path from axiom.item import Item from axiom.upgrade import registerAttributeCopyingUpgrader class Path(Item): schemaVersion = 2 typeName = 'test_upgrade_path' thePath = path() registerAttributeCopyingUpgrader(Path, 1, 2) Axiom-0.6.0/axiom/test/oldapp.py0000644000175000017500000000050410272262634016436 0ustar exarkunexarkun from axiom.item import Item from axiom.attributes import text, integer, reference class Player(Item): typeName = 'test_app_player' schemaVersion = 1 name = text() sword = reference() class Sword(Item): typeName = 'test_app_sword' schemaVersion = 1 name = text() hurtfulness = integer() Axiom-0.6.0/axiom/test/oldcirc.py0000644000175000017500000000047010453637107016602 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DeletionTest.testCircular -*- from axiom.item import Item from axiom.attributes import reference, integer class A(Item): typeName = 'test_circular_a' b = reference() class B(Item): typeName = 'test_circular_b' a = reference() n = integer() Axiom-0.6.0/axiom/test/oldobsolete.py0000644000175000017500000000050610453637107017476 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.DeletionTest.testPowerups -*- from axiom.item import Item from axiom.attributes import integer class Obsolete(Item): """ This is an obsolete class that will be destroyed in the upcoming version. """ typeName = 'test_upgrading_obsolete' nothing = integer() Axiom-0.6.0/axiom/test/oldpath.py0000644000175000017500000000036110452261533016610 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.PathUpgrade.testUpgradePath -*- from axiom.attributes import path from axiom.item import Item class Path(Item): schemaVersion = 1 typeName = 'test_upgrade_path' thePath = path() Axiom-0.6.0/axiom/test/onestepapp.py0000644000175000017500000000563710577603644017361 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.SwordUpgradeTest.test_upgradeSkipVersion -*- """ This is a newer version of the module found in oldapp.py, used in the upgrading tests. It upgrades from oldapp in one step, rather than requiring an intermediary step as morenewapp.py does. """ from axiom.item import Item from axiom.attributes import text, integer, reference, inmemory from axiom.upgrade import registerUpgrader class ActivateHelper: activated = 0 def activate(self): self.activated += 1 class Adventurer(ActivateHelper, Item): typeName = 'test_app_player' schemaVersion = 2 name = text() activated = inmemory() class InventoryEntry(ActivateHelper, Item): typeName = 'test_app_inv' schemaVersion = 1 owner = reference() owned = reference() activated = inmemory() class Sword(ActivateHelper, Item): typeName = 'test_app_sword' schemaVersion = 3 name = text() damagePerHit = integer() activated = inmemory() def owner(): def get(self): return self.store.findUnique(InventoryEntry, InventoryEntry.owned == self).owner return get, owner = property(*owner()) def sword2to3(oldsword): raise RuntimeError("The database does not contain any swords of version 2," " so you should be able to skip this version.") registerUpgrader(sword2to3, 'test_app_sword', 2, 3) ####### DOUBLE-LEGACY UPGRADE SPECTACULAR !! ########### # declare legacy class. from axiom.item import declareLegacyItem declareLegacyItem(typeName = 'test_app_sword', schemaVersion = 2, attributes = dict(name=text(), damagePerHit=integer(), owner=reference(), activated=inmemory())) def upgradePlayerAndSword(oldplayer): newplayer = oldplayer.upgradeVersion('test_app_player', 1, 2) newplayer.name = oldplayer.name oldsword = oldplayer.sword newsword = oldsword.upgradeVersion('test_app_sword', 1, 3, name=oldsword.name, damagePerHit=oldsword.hurtfulness * 2) invent = InventoryEntry(store=newsword.store, owner=newplayer, owned=newsword) return newplayer, newsword def player1to2(oldplayer): newplayer, newsword = upgradePlayerAndSword(oldplayer) return newplayer def sword1to3(oldsword): oldPlayerType = oldsword.store.getOldVersionOf('test_app_player', 1) oldplayer = list(oldsword.store.query(oldPlayerType, oldPlayerType.sword == oldsword))[0] newplayer, newsword = upgradePlayerAndSword(oldplayer) return newsword registerUpgrader(sword1to3, 'test_app_sword', 1, 3) registerUpgrader(player1to2, 'test_app_player', 1, 2) Axiom-0.6.0/axiom/test/openthenload.py0000644000175000017500000000127211010066072017627 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_xatop.ProcessConcurrencyTestCase -*- # this file is in support of the named test case import sys from axiom.store import Store from twisted.python import filepath # Open the store so that we get the bad version of the schema s = Store(filepath.FilePath(sys.argv[1])) # Alert our parent that we did that sys.stdout.write("1") sys.stdout.flush() # Grab the storeID we are supposed to be reading sids = sys.stdin.readline() sid = int(sids) # load the item we were told to - this should force a schema reload s.getItemByID(sid) # let our parent process know that we loaded it successfully sys.stdout.write("2") sys.stdout.flush() # then terminate cleanly Axiom-0.6.0/axiom/test/path_postcopy.py0000644000175000017500000000112710642535177020063 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.PathUpgrade.test_postCopy -*- from axiom.attributes import path from axiom.item import Item from axiom.upgrade import registerAttributeCopyingUpgrader class Path(Item): """ Trivial Item class for testing upgrading. """ schemaVersion = 2 typeName = 'test_upgrade_path' thePath = path() def fixPath(it): """ An example postcopy function, for fixing up an item after its attributes have been copied. """ it.thePath = it.thePath.child("foo") registerAttributeCopyingUpgrader(Path, 1, 2, postCopy=fixPath) Axiom-0.6.0/axiom/test/reactorimporthelper.py0000644000175000017500000000072411217776571021267 0ustar exarkunexarkun# Copyright 2009 Divmod, Inc. See LICENSE file for details """ Helper for axiomatic reactor-selection unit tests. """ # The main point of this file: import the default reactor. from twisted.internet import reactor # Define an Item, too, so that it can go into a Store and trigger an import of # this module at schema-check (ie, store opening) time. from axiom.item import Item from axiom.attributes import integer class SomeItem(Item): attribute = integer() Axiom-0.6.0/axiom/test/test_attributes.py0000644000175000017500000003157311037453061020412 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_attributes -*- import random from decimal import Decimal from epsilon.extime import Time from twisted.trial.unittest import TestCase from twisted.python.reflect import qual from axiom.store import Store from axiom.item import Item, normalize, Placeholder from axiom.attributes import Comparable, SQLAttribute, integer, timestamp, textlist from axiom.attributes import ieee754_double, point1decimal, money class Number(Item): typeName = 'test_number' schemaVersion = 1 value = ieee754_double() class IEEE754DoubleTest(TestCase): def testRoundTrip(self): s = Store() Number(store=s, value=7.1) n = s.findFirst(Number) self.assertEquals(n.value, 7.1) def testFPSumsAreBrokenSoDontUseThem(self): s = Store() for x in range(10): Number(store=s, value=0.1) self.assertNotEquals(s.query(Number).getColumn("value").sum(), 1.0) # This isn't really a unit test. It's documentation. self.assertEquals(s.query(Number).getColumn("value").sum(), 0.99999999999999989) class DecimalDoodad(Item): integral = point1decimal(default=0, allowNone=False) otherMoney = money(allowNone=True) extraintegral = integer() money = money(default=0) class FixedPointDecimalTest(TestCase): def testSum(self): s = Store() for x in range(10): DecimalDoodad(store=s, money=Decimal("0.10")) self.assertEquals(s.query(DecimalDoodad).getColumn("money").sum(), 1) def testRoundTrip(self): s = Store() DecimalDoodad(store=s, integral=19947, money=Decimal("4.3"), otherMoney=Decimal("-17.94")) self.assertEquals(s.findFirst(DecimalDoodad).integral, 19947) self.assertEquals(s.findFirst(DecimalDoodad).money, Decimal("4.3")) self.assertEquals(s.findFirst(DecimalDoodad).otherMoney, Decimal("-17.9400")) def testComparisons(self): s = Store() DecimalDoodad(store=s, money=Decimal("19947.000000"), otherMoney=19947) self.assertEquals( s.query(DecimalDoodad, DecimalDoodad.money == DecimalDoodad.otherMoney).count(), 1) self.assertEquals( s.query(DecimalDoodad, DecimalDoodad.money != DecimalDoodad.otherMoney).count(), 0) self.assertEquals( s.query(DecimalDoodad, DecimalDoodad.money == 19947).count(), 1) self.assertEquals( s.query(DecimalDoodad, DecimalDoodad.money == Decimal("19947")).count(), 1) def testDisallowedComparisons(self): # These tests should go away; it's (mostly) possible to support # comparison of different precisions: # sqlite> select 1/3; # 0 # sqlite> select 3/1; # 3 # sqlite> select 3/2; # 1 s = Store() DecimalDoodad(store=s, integral=1, money=1) self.assertRaises(TypeError, lambda : s.query( DecimalDoodad, DecimalDoodad.integral == DecimalDoodad.money)) self.assertRaises(TypeError, lambda : s.query( DecimalDoodad, DecimalDoodad.integral == DecimalDoodad.extraintegral)) class SpecialStoreIDAttributeTest(TestCase): def testStringStoreIDsDontWork(self): s = Store() sid = Number(store=s, value=1.0).storeID self.assertRaises(TypeError, s.getItemByID, str(sid)) self.assertRaises(TypeError, s.getItemByID, float(sid)) self.assertRaises(TypeError, s.getItemByID, unicode(sid)) class SortedItem(Item): typeName = 'test_sorted_thing' schemaVersion = 1 goingUp = integer() goingDown = integer() theSame = integer() class SortingTest(TestCase): def testCompoundSort(self): s = Store() L = [] r10 = range(10) random.shuffle(r10) L.append(SortedItem(store=s, goingUp=0, goingDown=1000, theSame=8)) for x in r10: L.append(SortedItem(store=s, goingUp=10+x, goingDown=10-x, theSame=7)) for colnms in [['goingUp'], ['goingUp', 'storeID'], ['goingUp', 'theSame'], ['theSame', 'goingUp'], ['theSame', 'storeID']]: LN = L[:] LN.sort(key=lambda si: tuple([getattr(si, colnm) for colnm in colnms])) ascsort = [getattr(SortedItem, colnm).ascending for colnm in colnms] descsort = [getattr(SortedItem, colnm).descending for colnm in colnms] self.assertEquals(LN, list(s.query(SortedItem, sort=ascsort))) LN.reverse() self.assertEquals(LN, list(s.query(SortedItem, sort=descsort))) class FunkyItem(Item): name = unicode() class BadAttributeTest(TestCase): def test_badAttribute(self): """ L{Item} should not allow setting undeclared attributes. """ s = Store() err = self.failUnlessRaises(AttributeError, FunkyItem, store=s, name=u"foo") self.assertEquals(str(err), "'FunkyItem' can't set attribute 'name'") class WhiteboxComparableTest(TestCase): def test_likeRejectsIllegalOperations(self): """ Test that invoking the underlying method which provides the interface to the LIKE operator raises a TypeError if it is invoked with too few arguments. """ self.assertRaises(TypeError, Comparable()._like, 'XYZ') someRandomDate = Time.fromISO8601TimeAndDate("1980-05-29") class DatedThing(Item): date = timestamp(default=someRandomDate) class CreationDatedThing(Item): creationDate = timestamp(defaultFactory=lambda : Time()) class StructuredDefaultTestCase(TestCase): def testTimestampDefault(self): s = Store() sid = DatedThing(store=s).storeID self.assertEquals(s.getItemByID(sid).date, someRandomDate) def testTimestampNow(self): s = Store() sid = CreationDatedThing(store=s).storeID self.failUnless( (Time().asDatetime() - s.getItemByID(sid).creationDate.asDatetime()).seconds < 10) class TaggedListyThing(Item): strlist = textlist() class StringListTestCase(TestCase): def tryRoundtrip(self, value): """ Attempt to roundtrip a value through a database store and load, to ensure the representation is not lossy. """ s = Store() tlt = TaggedListyThing(store=s, strlist=value) self.assertEquals(tlt.strlist, value) # Force it out of the cache, so it gets reloaded from the store del tlt tlt = s.findUnique(TaggedListyThing) self.assertEquals(tlt.strlist, value) def test_simpleListOfStrings(self): """ Test that a simple list can be stored and retrieved successfully. """ SOME_VALUE = [u'abc', u'def, ghi', u'jkl'] self.tryRoundtrip(SOME_VALUE) def test_emptyList(self): """ Test that an empty list can be stored and retrieved successfully. """ self.tryRoundtrip([]) def test_oldRepresentation(self): """ Test that the new code can still correctly load the old representation which could not handle an empty list. """ oldCases = [ (u'foo', [u'foo']), (u'', [u'']), (u'\x1f', [u'', u'']), (u'foo\x1fbar', [u'foo', u'bar']), ] for dbval, pyval in oldCases: self.assertEqual(TaggedListyThing.strlist.outfilter(dbval, None), pyval) class SQLAttributeDummyClass(Item): """ Dummy class which L{SQLAttributeTestCase} will poke at to assert various behaviors. """ dummyAttribute = SQLAttribute() class FullImplementationDummyClass(Item): """ Dummy class which L{SQLAttributeTestCase} will poke at to assert various behaviors - SQLAttribute is really an abstract base class, so this uses a concrete attribute (integer) for its assertions. """ dummyAttribute = integer() class SQLAttributeTestCase(TestCase): """ Tests for behaviors of the L{axiom.attributes.SQLAttribute} class. """ def test_attributeName(self): """ Test that an L{SQLAttribute} knows its own local name. """ self.assertEquals( SQLAttributeDummyClass.dummyAttribute.attrname, 'dummyAttribute') def test_fullyQualifiedName(self): """ Test that the L{SQLAttribute.fullyQualifiedName} method correctly returns the fully qualified Python name of the attribute: that is, the fully qualified Python name of the type it is defined on (plus a dot) plus the name of the attribute. """ self.assertEquals( SQLAttributeDummyClass.dummyAttribute.fullyQualifiedName(), 'axiom.test.test_attributes.SQLAttributeDummyClass.dummyAttribute') def test_fullyQualifiedStoreID(self): """ Test that the L{IColumn} implementation on the storeID emits the correct fullyQualifiedName as well. This is necessary because storeID is unfortunately implemented differently than other columns, due to its presence on Item. """ self.assertEquals( SQLAttributeDummyClass.storeID.fullyQualifiedName(), 'axiom.test.test_attributes.SQLAttributeDummyClass.storeID') def test_fullyQualifiedPlaceholder(self): """ Verify that the L{IColumn.fullyQualifiedName} implementation on placeholder attributes returns a usable string, but one which is recognizable as an invalid Python identifier. """ ph = Placeholder(SQLAttributeDummyClass) self.assertEquals( 'axiom.test.test_attributes.SQLAttributeDummyClass' '.dummyAttribute.' % (ph._placeholderCount,), ph.dummyAttribute.fullyQualifiedName()) def test_accessor(self): """ Test that the __get__ of SQLAttribute does the obvious thing, and returns its value when given an instance. """ dummy = FullImplementationDummyClass(dummyAttribute=1234) self.assertEquals( FullImplementationDummyClass.dummyAttribute.__get__(dummy), 1234) self.assertEquals(dummy.dummyAttribute, 1234) def test_storeIDAccessor(self): """ Test that the __get__ of the IColumn implementation for storeID works the same as that for normal attributes. """ s = Store() dummy = FullImplementationDummyClass(store=s) self.assertIdentical(s.getItemByID(dummy.storeID), dummy) def test_placeholderAccessor(self): """ Test that the __get__ of SQLAttribute does the obvious thing, and returns its value when given an instance. """ dummy = FullImplementationDummyClass(dummyAttribute=1234) self.assertEquals( Placeholder(FullImplementationDummyClass ).dummyAttribute.__get__(dummy), 1234) self.assertEquals(dummy.dummyAttribute, 1234) def test_typeAttribute(self): """ Test that the C{type} attribute of an L{SQLAttribute} references the class on which the attribute is defined. """ self.assertIdentical( SQLAttributeDummyClass, SQLAttributeDummyClass.dummyAttribute.type) def test_getShortColumnName(self): """ Test that L{Store.getShortColumnName} returns something pretty close to the name of the attribute. XXX Testing this really well would require being able to parse a good chunk of SQL. I don't know how to do that yet. -exarkun """ s = Store() self.assertIn( 'dummyAttribute', s.getShortColumnName(SQLAttributeDummyClass.dummyAttribute)) def test_getColumnName(self): """ Test that L{Store.getColumnName} returns something made up of the attribute's type's typeName and the attribute's name. """ s = Store() self.assertIn( 'dummyAttribute', s.getColumnName(SQLAttributeDummyClass.dummyAttribute)) self.assertIn( normalize(qual(SQLAttributeDummyClass)), s.getColumnName(SQLAttributeDummyClass.dummyAttribute)) Axiom-0.6.0/axiom/test/test_axiomatic.py0000644000175000017500000003522311217776571020214 0ustar exarkunexarkun# Copyright 2006-2009 Divmod, Inc. See LICENSE file for details """ Tests for L{axiom.scripts.axiomatic}. """ import sys, os, signal, StringIO from zope.interface import implements from twisted.python.log import msg from twisted.python.filepath import FilePath from twisted.python.procutils import which from twisted.python.runtime import platform from twisted.trial.unittest import SkipTest, TestCase from twisted.plugin import IPlugin from twisted.internet import reactor from twisted.internet.task import deferLater from twisted.internet.protocol import ProcessProtocol from twisted.internet.defer import Deferred from twisted.internet.error import ProcessTerminated from twisted.application.service import IService, IServiceCollection from axiom.store import Store from axiom.item import Item from axiom.attributes import boolean from axiom.scripts import axiomatic from axiom.listversions import SystemVersion from axiom.iaxiom import IAxiomaticCommand from twisted.plugins.axiom_plugins import AxiomaticStart from axiom.test.reactorimporthelper import SomeItem class RecorderService(Item): """ Minimal L{IService} implementation which remembers if it was ever started. This is used by tests to make sure services get started when they should be. """ implements(IService) started = boolean( doc=""" A flag which is initially false and set to true once C{startService} is called. """, default=False) name = "recorder" def setServiceParent(self, parent): """ Do the standard Axiom thing to make sure this service becomes a child of the top-level store service. """ IServiceCollection(parent).addService(self) def startService(self): """ Remember that this method was called. """ self.started = True def stopService(self): """ Ignore this event. """ class StartTests(TestCase): """ Test the axiomatic start sub-command. """ def setUp(self): """ Work around Twisted #3178 by tricking trial into thinking something asynchronous is happening. """ return deferLater(reactor, 0, lambda: None) def _getRunDir(self, dbdir): return dbdir.child("run") def _getLogDir(self, dbdir): return self._getRunDir(dbdir).child("logs") def test_getArguments(self): """ L{Start.getArguments} adds a I{--pidfile} argument if one is not present and a I{--logfile} argument if one is not present and daemonization is enabled and adds a I{--dbdir} argument pointing at the store it is passed. """ dbdir = FilePath(self.mktemp()) store = Store(dbdir) run = self._getRunDir(dbdir) logs = self._getLogDir(dbdir) start = axiomatic.Start() logfileArg = ["--logfile", logs.child("axiomatic.log").path] # twistd on Windows doesn't support PID files, so on Windows, # getArguments should *not* add --pidfile. if platform.isWindows(): pidfileArg = [] else: pidfileArg = ["--pidfile", run.child("axiomatic.pid").path] restArg = ["axiomatic-start", "--dbdir", dbdir.path] self.assertEqual( start.getArguments(store, []), logfileArg + pidfileArg + restArg) self.assertEqual( start.getArguments(store, ["--logfile", "foo"]), ["--logfile", "foo"] + pidfileArg + restArg) self.assertEqual( start.getArguments(store, ["-l", "foo"]), ["-l", "foo"] + pidfileArg + restArg) self.assertEqual( start.getArguments(store, ["--nodaemon"]), ["--nodaemon"] + pidfileArg + restArg) self.assertEqual( start.getArguments(store, ["-n"]), ["-n"] + pidfileArg + restArg) self.assertEqual( start.getArguments(store, ["--pidfile", "foo"]), ["--pidfile", "foo"] + logfileArg + restArg) def test_logDirectoryCreated(self): """ If L{Start.getArguments} adds a I{--logfile} argument, it creates the necessary directory. """ dbdir = FilePath(self.mktemp()) store = Store(dbdir) start = axiomatic.Start() start.getArguments(store, ["-l", "foo"]) self.assertFalse(self._getLogDir(dbdir).exists()) start.getArguments(store, []) self.assertTrue(self._getLogDir(dbdir).exists()) def test_parseOptions(self): """ L{Start.parseOptions} adds axiomatic-suitable defaults for any unspecified parameters and then calls L{twistd.run} with the modified argument list. """ argv = [] def fakeRun(): argv.extend(sys.argv) options = axiomatic.Options() options['dbdir'] = dbdir = self.mktemp() start = axiomatic.Start() start.parent = options start.run = fakeRun original = sys.argv[:] try: start.parseOptions(["-l", "foo", "--pidfile", "bar"]) finally: sys.argv[:] = original self.assertEqual( argv, [sys.argv[0], "-l", "foo", "--pidfile", "bar", "axiomatic-start", "--dbdir", os.path.abspath(dbdir)]) def test_parseOptionsHelp(self): """ L{Start.parseOptions} writes usage information to stdout if C{"--help"} is in the argument list it is passed and L{twistd.run} is not called. """ start = axiomatic.Start() start.run = None original = sys.stdout sys.stdout = stdout = StringIO.StringIO() try: self.assertRaises(SystemExit, start.parseOptions, ["--help"]) finally: sys.stdout = original # Some random options that should be present. This is a bad test # because we don't control what C{opt_help} actually does and we don't # even really care as long as it's the same as what I{twistd --help} # does. We could try running them both and comparing, but then we'd # still want to do some sanity check against one of them in case we end # up getting the twistd version incorrectly somehow... -exarkun self.assertIn("--reactor", stdout.getvalue()) if not platform.isWindows(): # This isn't an option on Windows, so it shouldn't be there. self.assertIn("--uid", stdout.getvalue()) # Also, we don't want to see twistd plugins here. self.assertNotIn("axiomatic-start", stdout.getvalue()) def test_checkSystemVersion(self): """ The L{IService} returned by L{AxiomaticStart.makeService} calls L{checkSystemVersion} with its store when it is started. This is done for I{axiomatic start} rather than somewhere in the implementation of L{Store} so that it happens only once per server startup. The overhead of doing it whenever a store is opened is non-trivial. """ dbdir = self.mktemp() store = Store(dbdir) service = AxiomaticStart.makeService({'dbdir': dbdir, 'debug': False}) self.assertEqual(store.query(SystemVersion).count(), 0) service.startService() self.assertEqual(store.query(SystemVersion).count(), 1) return service.stopService() def test_axiomOptions(self): """ L{AxiomaticStart.options} takes database location and debug setting parameters. """ options = AxiomaticStart.options() options.parseOptions([]) self.assertEqual(options['dbdir'], None) self.assertFalse(options['debug']) options.parseOptions(["--dbdir", "foo", "--debug"]) self.assertEqual(options['dbdir'], 'foo') self.assertTrue(options['debug']) def test_makeService(self): """ L{AxiomaticStart.makeService} returns the L{IService} powerup of the L{Store} at the directory in the options object it is passed. """ dbdir = FilePath(self.mktemp()) store = Store(dbdir) recorder = RecorderService(store=store) self.assertFalse(recorder.started) store.powerUp(recorder, IService) store.close() service = AxiomaticStart.makeService({"dbdir": dbdir, "debug": False}) service.startService() service.stopService() store = Store(dbdir) self.assertTrue(store.getItemByID(recorder.storeID).started) def test_reactorSelection(self): """ L{AxiomaticStart} optionally takes the name of a reactor and installs it instead of the default reactor. """ # Since this process is already hopelessly distant from the state in # which I{axiomatic start} operates, it would make no sense to try a # functional test of this behavior in this process. Since the # behavior being tested involves lots of subtle interactions between # lots of different pieces of code (the reactor might get installed # at the end of a ten-deep chain of imports going through as many # different projects), it also makes no sense to try to make this a # unit test. So, start a child process and try to use the alternate # reactor functionality there. here = FilePath(__file__) # Try to find it relative to the source of this test. bin = here.parent().parent().parent().child("bin") axiomatic = bin.child("axiomatic") if axiomatic.exists(): # Great, use that one. axiomatic = axiomatic.path else: # Try to find it on the path, instead. axiomatics = which("axiomatic") if axiomatics: # Great, it was on the path. axiomatic = axiomatics[0] else: # Nope, not there, give up. raise SkipTest( "Could not find axiomatic script on path or at %s" % ( axiomatic.path,)) # Create a store for the child process to use and put an item in it. # This will force an import of the module that defines that item's # class when the child process starts. The module imports the default # reactor at the top-level, making this the worst-case for the reactor # selection code. storePath = self.mktemp() store = Store(storePath) SomeItem(store=store) store.close() # Install select reactor because it available on all platforms, and # it is still an error to try to install the select reactor even if # the already installed reactor was the select reactor. argv = [ sys.executable, axiomatic, "-d", storePath, "start", "--reactor", "select", "-n"] expected = [ "reactor class: twisted.internet.selectreactor.SelectReactor.", "reactor class: "] proto, complete = AxiomaticStartProcessProtocol.protocolAndDeferred(expected) # Make sure the version of Axiom under test is found by the child # process. import axiom, epsilon environ = os.environ.copy() environ['PYTHONPATH'] = os.pathsep.join([ FilePath(epsilon.__file__).parent().parent().path, FilePath(axiom.__file__).parent().parent().path, environ['PYTHONPATH']]) reactor.spawnProcess(proto, sys.executable, argv, env=environ) return complete class AxiomaticStartProcessProtocol(ProcessProtocol): """ L{AxiomaticStartProcessProtocol} watches an I{axiomatic start} process and fires a L{Deferred} when it sees either successful reactor installation or process termination. @ivar _success: A flag which is C{False} until the expected text is found in the child's stdout and C{True} thereafter. @ivar _output: A C{str} giving all of the stdout from the child received thus far. """ _success = False _output = "" def protocolAndDeferred(cls, expected): """ Create and return an L{AxiomaticStartProcessProtocol} and a L{Deferred}. The L{Deferred} will fire when the protocol receives the given string on standard out or when the process ends, whichever comes first. """ proto = cls() proto._complete = Deferred() proto._expected = expected return proto, proto._complete protocolAndDeferred = classmethod(protocolAndDeferred) def errReceived(self, bytes): """ Report the given unexpected stderr data. """ msg("Received stderr from axiomatic: %r" % (bytes,)) def outReceived(self, bytes): """ Add the given bytes to the output buffer and check to see if the reactor has been installed successfully, firing the completion L{Deferred} if so. """ msg("Received stdout from axiomatic: %r" % (bytes,)) self._output += bytes if not self._success: for line in self._output.splitlines(): for expectedLine in self._expected: if expectedLine in line: msg("Received expected output") self._success = True self.transport.signalProcess("TERM") def processEnded(self, reason): """ Check that the process exited in the way expected and that the required text has been found in its output and fire the result L{Deferred} with either a value or a failure. """ self._complete, result = None, self._complete if self._success: if platform.isWindows() or ( # Windows can't tell that we SIGTERM'd it, so sorry. reason.check(ProcessTerminated) and reason.value.signal == signal.SIGTERM): result.callback(None) return # Something went wrong. result.errback(reason) class TestMisc(TestCase): """ Test things not directly involving running axiomatic commands. """ def test_axiomaticCommandProvides(self): """ Test that AxiomaticCommand itself does not provide IAxiomaticCommand or IPlugin, but subclasses do. """ self.failIf(IAxiomaticCommand.providedBy(axiomatic.AxiomaticCommand), 'IAxiomaticCommand provided') self.failIf(IPlugin.providedBy(axiomatic.AxiomaticCommand), 'IPlugin provided') class _TestSubClass(axiomatic.AxiomaticCommand): pass self.failUnless(IAxiomaticCommand.providedBy(_TestSubClass), 'IAxiomaticCommand not provided') self.failUnless(IPlugin.providedBy(_TestSubClass), 'IPlugin not provided') Axiom-0.6.0/axiom/test/test_batch.py0000644000175000017500000004761111224737657017323 0ustar exarkunexarkun from twisted.trial import unittest from twisted.python import failure, filepath from twisted.application import service from axiom import iaxiom, store, item, attributes, batch, substore class TestWorkUnit(item.Item): information = attributes.integer() def __repr__(self): return '' % (self.information,) class ExtraUnit(item.Item): unformashun = attributes.text() class WorkListener(item.Item): comply = attributes.integer(doc=""" This exists solely to satisfy the requirement that Items have at least one persistent attribute. """) listener = attributes.inmemory(doc=""" A callable which will be invoked by processItem. This will be provided by the test method and will assert that the appropriate items are received, in the appropriate order. """) def processItem(self, item): self.listener(item) class BatchTestCase(unittest.TestCase): def setUp(self): self.procType = batch.processor(TestWorkUnit) self.store = store.Store() self.scheduler = iaxiom.IScheduler(self.store) def testItemTypeCreation(self): """ Test that processors for a different Item types can be created, that they are valid Item types themselves, and that repeated calls return the same object when appropriate. """ procB = batch.processor(TestWorkUnit) self.assertIdentical(self.procType, procB) procC = batch.processor(ExtraUnit) self.failIfIdentical(procB, procC) self.failIfEqual(procB.typeName, procC.typeName) def testInstantiation(self): """ Test that a batch processor can be instantiated and added to a database, and that it can be retrieved in the usual ways. """ proc = self.procType(store=self.store) self.assertIdentical(self.store.findUnique(self.procType), proc) def testListenerlessProcessor(self): """ Test that a batch processor can be stepped even if it has no listeners, and that it correctly reports it has no work to do. """ proc = self.procType(store=self.store) self.failIf(proc.step(), "expected no more work to be reported, some was") TestWorkUnit(store=self.store, information=0) self.failIf(proc.step(), "expected no more work to be reported, some was") def testListeners(self): """ Test that items can register or unregister their interest in a processor's batch of items. """ proc = self.procType(store=self.store) listenerA = WorkListener(store=self.store) listenerB = WorkListener(store=self.store) self.assertEquals(list(proc.getReliableListeners()), []) proc.addReliableListener(listenerA) self.assertEquals(list(proc.getReliableListeners()), [listenerA]) proc.addReliableListener(listenerB) expected = [listenerA, listenerB] listeners = list(proc.getReliableListeners()) self.assertEquals(sorted(expected), sorted(listeners)) proc.removeReliableListener(listenerA) self.assertEquals(list(proc.getReliableListeners()), [listenerB]) proc.removeReliableListener(listenerB) self.assertEquals(list(proc.getReliableListeners()), []) def testBasicProgress(self): """ Test that when a processor is created and given a chance to run, it completes some work. """ processedItems = [] def listener(item): processedItems.append(item.information) proc = self.procType(store=self.store) listener = WorkListener(store=self.store, listener=listener) proc.addReliableListener(listener) self.assertEquals(processedItems, []) self.failIf(proc.step(), "expected no work to be reported, some was") self.assertEquals(processedItems, []) for i in range(3): TestWorkUnit(store=self.store, information=i) ExtraUnit(store=self.store, unformashun=unicode(-i)) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [0]) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [0, 1]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [0, 1, 2]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [0, 1, 2]) def testProgressAgainstExisting(self): """ Test that when a processor is created when work units exist already, it works backwards to notify its listener of all those existing work units. Also test that work units created after the processor are also handled. """ processedItems = [] def listener(item): processedItems.append(item.information) proc = self.procType(store=self.store) listener = WorkListener(store=self.store, listener=listener) for i in range(3): TestWorkUnit(store=self.store, information=i) proc.addReliableListener(listener) self.assertEquals(processedItems, []) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [2]) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [2, 1]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [2, 1, 0]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [2, 1, 0]) for i in range(3, 6): TestWorkUnit(store=self.store, information=i) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [2, 1, 0, 3]) self.failUnless(proc.step(), "expected more work to be reported, none was") self.assertEquals(processedItems, [2, 1, 0, 3, 4]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [2, 1, 0, 3, 4, 5]) self.failIf(proc.step(), "expected no more work to be reported, some was") self.assertEquals(processedItems, [2, 1, 0, 3, 4, 5]) def testBrokenListener(self): """ Test that if a listener's processItem method raises an exception, processing continues beyond that item and that an error marker is created for that item. """ errmsg = "This reliable listener is not very reliable!" processedItems = [] def listener(item): if item.information == 1: raise RuntimeError(errmsg) processedItems.append(item.information) proc = self.procType(store=self.store) listener = WorkListener(store=self.store, listener=listener) proc.addReliableListener(listener) # Make some work, step the processor, and fake the error handling # behavior the Scheduler actually provides. for i in range(3): TestWorkUnit(store=self.store, information=i) try: proc.step() except batch._ProcessingFailure: proc.timedEventErrorHandler( (u"Oh crap, I do not have a TimedEvent, " "I sure hope that never becomes a problem."), failure.Failure()) self.assertEquals(processedItems, [0, 2]) errors = list(proc.getFailedItems()) self.assertEquals(len(errors), 1) self.assertEquals(errors[0][0], listener) self.assertEquals(errors[0][1].information, 1) loggedErrors = self.flushLoggedErrors(RuntimeError) self.assertEquals(len(loggedErrors), 1) self.assertEquals(loggedErrors[0].getErrorMessage(), errmsg) def testMultipleListeners(self): """ Test that a single batch processor with multiple listeners added at different times delivers each item to each listener. """ processedItemsA = [] def listenerA(item): processedItemsA.append(item.information) processedItemsB = [] def listenerB(item): processedItemsB.append(item.information) proc = self.procType(store=self.store) for i in range(2): TestWorkUnit(store=self.store, information=i) firstListener = WorkListener(store=self.store, listener=listenerA) proc.addReliableListener(firstListener) for i in range(2, 4): TestWorkUnit(store=self.store, information=i) secondListener = WorkListener(store=self.store, listener=listenerB) proc.addReliableListener(secondListener) for i in range(4, 6): TestWorkUnit(store=self.store, information=i) for i in range(100): if not proc.step(): break else: self.fail("Processing loop took too long") self.assertEquals( processedItemsA, [2, 3, 4, 5, 1, 0]) self.assertEquals( processedItemsB, [4, 5, 3, 2, 1, 0]) def testRepeatedAddListener(self): """ Test that adding the same listener repeatedly has the same effect as adding it once. """ proc = self.procType(store=self.store) listener = WorkListener(store=self.store) proc.addReliableListener(listener) proc.addReliableListener(listener) self.assertEquals(list(proc.getReliableListeners()), [listener]) def testSuperfluousItemAddition(self): """ Test the addItem method for work which would have been done already, and so for which addItem should therefore be a no-op. """ processedItems = [] def listener(item): processedItems.append(item.information) proc = self.procType(store=self.store) listener = WorkListener(store=self.store, listener=listener) # Create a couple items so there will be backwards work to do. one = TestWorkUnit(store=self.store, information=0) two = TestWorkUnit(store=self.store, information=1) rellist = proc.addReliableListener(listener) # Create a couple more items so there will be some forwards work to do. three = TestWorkUnit(store=self.store, information=2) four = TestWorkUnit(store=self.store, information=3) # There are only two regions at this point - work behind and work # ahead; no work has been done yet, so there's no region in between. # Add items behind and ahead of the point; these should not result in # any explicit tracking items, since they would have been processed in # due course anyway. rellist.addItem(two) rellist.addItem(three) for i in range(100): if not proc.step(): break else: self.fail("Processing loop took too long") self.assertEquals(processedItems, [2, 3, 1, 0]) def testReprocessItemAddition(self): """ Test the addItem method for work which is within the bounds of work already done, and so which would not have been processed without the addItem call. """ processedItems = [] def listener(item): processedItems.append(item.information) proc = self.procType(store=self.store) listener = WorkListener(store=self.store, listener=listener) rellist = proc.addReliableListener(listener) one = TestWorkUnit(store=self.store, information=0) two = TestWorkUnit(store=self.store, information=1) three = TestWorkUnit(store=self.store, information=2) for i in range(100): if not proc.step(): break else: self.fail("Processing loop took too long") self.assertEquals(processedItems, range(3)) # Now that we have processed some items, re-add one of those items to # be re-processed and make sure it actually does get passed to the # listener again. processedItems = [] rellist.addItem(two) for i in xrange(100): if not proc.step(): break else: self.fail("Processing loop took too long") self.assertEquals(processedItems, [1]) def test_processorStartsUnscheduled(self): """ Test that when a processor is first created, it is not scheduled to perform any work. """ proc = self.procType(store=self.store) self.assertIdentical(proc.scheduled, None) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), []) def test_itemAddedIgnoredWithoutListeners(self): """ Test that if C{itemAdded} is called while the processor is idle but there are no listeners, the processor does not schedule itself to do any work. """ proc = self.procType(store=self.store) proc.itemAdded() self.assertEqual(proc.scheduled, None) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), []) def test_itemAddedSchedulesProcessor(self): """ Test that if C{itemAdded} is called while the processor is idle and there are listeners, the processor does schedules itself to do some work. """ proc = self.procType(store=self.store) listener = WorkListener(store=self.store) proc.addReliableListener(listener) # Get rid of the scheduler state that addReliableListener call just # created. proc.scheduled = None self.scheduler.unscheduleAll(proc) proc.itemAdded() self.failIfEqual(proc.scheduled, None) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), [proc.scheduled]) def test_addReliableListenerSchedulesProcessor(self): """ Test that if C{addReliableListener} is called while the processor is idle, the processor schedules itself to do some work. """ proc = self.procType(store=self.store) listener = WorkListener(store=self.store) proc.addReliableListener(listener) self.failIfEqual(proc.scheduled, None) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), [proc.scheduled]) def test_itemAddedWhileScheduled(self): """ Test that if C{itemAdded} is called when the processor is already scheduled to run, the processor remains scheduled to run at the same time. """ proc = self.procType(store=self.store) listener = WorkListener(store=self.store) proc.addReliableListener(listener) when = proc.scheduled proc.itemAdded() self.assertEquals(proc.scheduled, when) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), [proc.scheduled]) def test_addReliableListenerWhileScheduled(self): """ Test that if C{addReliableListener} is called when the processor is already scheduled to run, the processor remains scheduled to run at the same time. """ proc = self.procType(store=self.store) listenerA = WorkListener(store=self.store) proc.addReliableListener(listenerA) when = proc.scheduled listenerB = WorkListener(store=self.store) proc.addReliableListener(listenerB) self.assertEquals(proc.scheduled, when) self.assertEquals( list(self.scheduler.scheduledTimes(proc)), [proc.scheduled]) def test_processorIdlesWhenCaughtUp(self): """ Test that the C{run} method of the processor returns C{None} when it has done all the work it needs to do, thus unscheduling the processor. """ proc = self.procType(store=self.store) self.assertIdentical(proc.run(), None) class BatchCallTestItem(item.Item): called = attributes.boolean(default=False) def callIt(self): self.called = True class BrokenException(Exception): """ Exception always raised by L{BrokenReliableListener.processItem}. """ class BatchWorkItem(item.Item): """ Item class which will be delivered as work units for testing error handling around reliable listeners. """ value = attributes.text(default=u"unprocessed") BatchWorkSource = batch.processor(BatchWorkItem) class BrokenReliableListener(item.Item): """ A listener for batch work which always raises an exception from its processItem method. Used to test that errors from processItem are properly handled. """ anAttribute = attributes.integer() def processItem(self, item): raise BrokenException("Broken Reliable Listener is working as expected.") class WorkingReliableListener(item.Item): """ A listener for batch work which actually works. Used to test that even if a broken reliable listener is around, working ones continue to receive new items to process. """ anAttribute = attributes.integer() def processItem(self, item): item.value = u"processed" class RemoteTestCase(unittest.TestCase): def testBatchService(self): """ Make sure SubStores can be adapted to L{iaxiom.IBatchService}. """ dbdir = filepath.FilePath(self.mktemp()) s = store.Store(dbdir) ss = substore.SubStore.createNew(s, 'substore') bs = iaxiom.IBatchService(ss) self.failUnless(iaxiom.IBatchService.providedBy(bs)) def testProcessLifetime(self): """ Test that the batch system process can be started and stopped. """ dbdir = filepath.FilePath(self.mktemp()) s = store.Store(dbdir) svc = batch.BatchProcessingControllerService(s) svc.startService() return svc.stopService() def testCalling(self): """ Test invoking a method on an item in the batch process. """ dbdir = filepath.FilePath(self.mktemp()) s = store.Store(dbdir) ss = substore.SubStore.createNew(s, 'substore') service.IService(s).startService() d = iaxiom.IBatchService(ss).call(BatchCallTestItem(store=ss.open()).callIt) def called(ign): self.failUnless(ss.open().findUnique(BatchCallTestItem).called, "Was not called") return service.IService(s).stopService() return d.addCallback(called) def testProcessingServiceStepsOverErrors(self): """ If any processor raises an unexpected exception, the work unit which was being processed should be marked as having had an error and processing should move on to the next item. Make sure that this actually happens when L{BatchProcessingService} is handling those errors. """ BATCH_WORK_UNITS = 3 dbdir = filepath.FilePath(self.mktemp()) st = store.Store(dbdir) source = BatchWorkSource(store=st) for i in range(BATCH_WORK_UNITS): BatchWorkItem(store=st) source.addReliableListener(BrokenReliableListener(store=st), iaxiom.REMOTE) source.addReliableListener(WorkingReliableListener(store=st), iaxiom.REMOTE) svc = batch.BatchProcessingService(st, iaxiom.REMOTE) task = svc.step() # Loop 6 (BATCH_WORK_UNITS * 2) times - three items times two # listeners, it should not take any more than six iterations to # completely process all work. for i in xrange(BATCH_WORK_UNITS * 2): task.next() self.assertEquals( len(self.flushLoggedErrors(BrokenException)), BATCH_WORK_UNITS) self.assertEquals( st.query(BatchWorkItem, BatchWorkItem.value == u"processed").count(), BATCH_WORK_UNITS) Axiom-0.6.0/axiom/test/test_count.py0000644000175000017500000000443510330527357017356 0ustar exarkunexarkunfrom twisted.trial.unittest import TestCase from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, AND, OR class ThingsWithIntegers(Item): schemaVersion = 1 typeName = 'axiom_test_thing_with_integers' a = integer() b = integer() class NotARealThing(Item): schemaVersion = 1 typeName = 'axiom_test_never_created_item' irrelevantAttribute = integer() def __init__(self, **kw): raise NotImplementedError("You cannot create things that are not real!") class TestCountQuery(TestCase): RANGE = 10 MIDDLE = 5 def assertCountEqualsQuery(self, item, cond = None): self.assertEquals(self.store.count(item, cond), len(list(self.store.query(item, cond))), 'count and len(list(query)) not equals: %r,%r'%(item, cond)) def setUp(self): self.store = Store() def populate(): for i in xrange(self.RANGE): for j in xrange(self.RANGE): ThingsWithIntegers(store = self.store, a = i, b = j) self.store.transact(populate) def testBasicCount(self): self.assertCountEqualsQuery(ThingsWithIntegers) def testSimpleConditionCount(self): self.assertCountEqualsQuery(ThingsWithIntegers, ThingsWithIntegers.a > self.MIDDLE) def testTwoFieldsConditionCount(self): self.assertCountEqualsQuery(ThingsWithIntegers, ThingsWithIntegers.a == ThingsWithIntegers.b) def testANDConditionCount(self): self.assertCountEqualsQuery(ThingsWithIntegers, AND(ThingsWithIntegers.a > self.MIDDLE, ThingsWithIntegers.b < self.MIDDLE)) def testORConditionCount(self): self.assertCountEqualsQuery(ThingsWithIntegers, OR(ThingsWithIntegers.a > self.MIDDLE, ThingsWithIntegers.b < self.MIDDLE)) def testEmptyResult(self): self.assertCountEqualsQuery(ThingsWithIntegers, ThingsWithIntegers.a == self.RANGE + 3) def testNonExistentTable(self): self.assertCountEqualsQuery(NotARealThing, NotARealThing.irrelevantAttribute == self.RANGE + 3) Axiom-0.6.0/axiom/test/test_crossstore.py0000644000175000017500000000344411010066072020417 0ustar exarkunexarkun from axiom.store import Store from axiom.substore import SubStore from axiom.item import Item from axiom.attributes import integer from twisted.trial.unittest import TestCase from twisted.python import filepath class ExplosiveItem(Item): nothing = integer() def yourHeadAsplode(self): 1 / 0 class CrossStoreTest(TestCase): def setUp(self): self.spath = filepath.FilePath(self.mktemp() + ".axiom") self.store = Store(self.spath) self.substoreitem = SubStore.createNew(self.store, ["sub.axiom"]) self.substore = self.substoreitem.open() # Not available yet. self.substore.attachToParent() class TestCrossStoreTransactions(CrossStoreTest): def testCrossStoreTransactionality(self): def createTwoSubStoreThings(): ExplosiveItem(store=self.store) ei = ExplosiveItem(store=self.substore) ei.yourHeadAsplode() self.failUnlessRaises(ZeroDivisionError, self.store.transact, createTwoSubStoreThings) self.failUnlessEqual( self.store.query(ExplosiveItem).count(), 0) self.failUnlessEqual( self.substore.query(ExplosiveItem).count(), 0) class TestCrossStoreInsert(CrossStoreTest): def testCrossStoreInsert(self): def populate(s, n): for i in xrange(n): ExplosiveItem(store=s) self.store.transact(populate, self.store, 2) self.store.transact(populate, self.substore, 3) self.failUnlessEqual( self.store.query(ExplosiveItem).count(), 2) self.failUnlessEqual( self.substore.query(ExplosiveItem).count(), 3) Axiom-0.6.0/axiom/test/test_dependency.py0000644000175000017500000005053511050455516020343 0ustar exarkunexarkun# Copright 2008 Divmod, Inc. See LICENSE file for details. from zope.interface import Interface, implements from twisted.trial import unittest from axiom import dependency from axiom.store import Store from axiom.substore import SubStore from axiom.item import Item from axiom.errors import UnsatisfiedRequirement from axiom.attributes import text, integer, reference, inmemory class IElectricityGrid(Interface): """ An interface representing something likely to be present in the site store. As opposed to the other examples below, present in a hypothetical kitchen, it is something managed for lots of different people. """ def draw(watts): """ Draw some number of watts from this power grid. @return: a constant, one of L{REAL_POWER} or L{FAKE_POWER}. """ FAKE_POWER = 'fake power' REAL_POWER = 'real power' class NullGrid(object): """ This is a null electricity grid. It is provided as a default grid in the case where a site store is not present. """ implements(IElectricityGrid) def __init__(self, siteStore): """ Create a null grid with a reference to the site store. """ self.siteStore = siteStore def draw(self, watts): """ Draw some watts from the null power grid. For simplicity of examples below, this works. Not in real life, though. In a more realistic example, this might do something temporary to work around the site misconfiguration, and warn an administrator that someone was getting power out of thin air. Or, depending on the application, we might raise an exception to prevent this operation from succeeding. """ return FAKE_POWER class RealGrid(Item): """ A power grid for the power utility; this is an item which should be installed on a site store. """ implements(IElectricityGrid) powerupInterfaces = (IElectricityGrid,) totalWattage = integer(default=10000000, doc=""" Total wattage of the entire electricity grid. (This is currently a dummy attribute.) """) def draw(self, watts): """ Draw some real power from the real power grid. This is the way that the site should probably be working. """ return REAL_POWER def noGrid(siteStore): """ No power grid was available. Raise an exception. """ raise RuntimeError("No power grid available.") class IronLung(Item): """ This item is super serious business! It has to draw real power from the real power grid; it won't be satisfied with fake power; too risky for its life-critical operation. So it doesn't specify a placeholder default grid. @ivar grid: a read-only reference to an L{IElectricityGrid} provider, resolved via the site store this L{IronLung} is in. """ wattsPerPump = integer(default=100, allowNone=False, doc=""" The number of watts to draw from L{self.grid} when L{IronLung.pump} is called. """) grid = dependency.requiresFromSite(IElectricityGrid) def pump(self): """ Attempting to pump the iron lung by talking to the power grid. """ return self.grid.draw(self.wattsPerPump) class SpecifiedBadDefaults(Item): """ Depends on a power grid, but specifies defaults for that dependency that should never be invoked. This item can't retrieve a grid. @ivar grid: Retrieving this attribute should never work. It should raise L{RuntimeError}. """ dummy = integer(doc=""" Dummy attribute required by axiom for Item class definition. """) grid = dependency.requiresFromSite(IElectricityGrid, noGrid, noGrid) def pump(self): """ Attempting to pump the iron lung by talking to the power grid. """ return self.grid.draw(100) class Kitchen(Item): name = text() class PowerStrip(Item): """ A simulated collection of power points. This is where L{IAppliance} providers get their power from. @ivar grid: A read-only reference to an L{IElectricityGrid} provider. This may be a powerup provided by the site store or a L{NullGrid} if no powerup is installed. """ voltage = integer() grid = dependency.requiresFromSite(IElectricityGrid, NullGrid, NullGrid) def setForUSElectricity(self): if not self.voltage: self.voltage = 110 else: raise RuntimeError("Oops! power strip already set up") def draw(self, watts): """ Draw the given amount of power from this strip's electricity grid. @param watts: The number of watts to draw. @type watts: L{int} """ return self.grid.draw(watts) class PowerPlant(Item): """ This is an item which supplies the grid with power. It lives in the site store. @ivar grid: a read-only reference to an L{IElectricityGrid} powerup on the site store, or a L{NullGrid} if none is installed. If this item is present in a user store, retrieving this will raise a L{RuntimeError}. """ wattage = integer(default=1000, allowNone=False, doc=""" The amount of power the grid will be supplied with. Currently a dummy attribute required by axiom for item definition. """) grid = dependency.requiresFromSite(IElectricityGrid, noGrid, NullGrid) class IAppliance(Interface): pass class IBreadConsumer(Interface): pass class Breadbox(Item): slices = integer(default=100) def dispenseBread(self, amt): self.slices -= amt class Toaster(Item): implements(IBreadConsumer) powerupInterfaces = (IAppliance, IBreadConsumer) powerStrip = dependency.dependsOn(PowerStrip, lambda ps: ps.setForUSElectricity(), doc="the power source for this toaster") description = text() breadFactory = dependency.dependsOn( Breadbox, doc="the thing we get bread input from", whenDeleted=reference.CASCADE) callback = inmemory() def activate(self): self.callback = None def installed(self): if self.callback is not None: self.callback("installed") def uninstalled(self): if self.callback is not None: self.callback("uninstalled") def toast(self): self.powerStrip.draw(100) self.breadFactory.dispenseBread(2) def powerstripSetup(ps): ps.setForUSElectricity() class Blender(Item): powerStrip = dependency.dependsOn(PowerStrip, powerstripSetup) description = text() def __getPowerupInterfaces__(self, powerups): yield (IAppliance, 0) class IceCrusher(Item): blender = dependency.dependsOn(Blender) class Blender2(Item): powerStrip = reference() class DependencyTest(unittest.TestCase): def setUp(self): self.store = Store() def test_dependsOn(self): """ Ensure that classes with dependsOn attributes set up the dependency map properly. """ foo = Blender(store=self.store) depBlob = dependency._globalDependencyMap.get(Blender, None)[0] self.assertEqual(depBlob[0], PowerStrip) self.assertEqual(depBlob[1], powerstripSetup) self.assertEqual(depBlob[2], Blender.__dict__['powerStrip']) def test_classDependsOn(self): """ Ensure that classDependsOn sets up the dependency map properly. """ dependency.classDependsOn(Blender2, PowerStrip, powerstripSetup, Blender2.__dict__['powerStrip']) depBlob = dependency._globalDependencyMap.get(Blender2, None)[0] self.assertEqual(depBlob[0], PowerStrip) self.assertEqual(depBlob[1], powerstripSetup) self.assertEqual(depBlob[2], Blender2.__dict__['powerStrip']) def test_basicInstall(self): """ If a Toaster gets installed in a Kitchen, make sure that the required dependencies get instantiated and installed too. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) self.assertEquals(e.powerStrip, None) dependency.installOn(e, foo) e.toast() ps = self.store.findUnique(PowerStrip, default=None) bb = self.store.findUnique(Breadbox, default=None) self.failIfIdentical(ps, None) self.failIfIdentical(bb, None) self.assertEquals(e.powerStrip, ps) self.assertEquals(ps.voltage, 110) self.assertEquals(e.breadFactory, bb) self.assertEquals(set(dependency.installedRequirements(e, foo)), set([ps, bb])) self.assertEquals(list(dependency.installedDependents(ps, foo)), [e]) def test_basicUninstall(self): """ Ensure that uninstallation removes the adapter from the former install target and all orphaned dependencies. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) dependency.installOn(e, foo) dependency.uninstallFrom(e, foo) self.assertEqual(dependency.installedOn(e), None) self.assertEqual(dependency.installedOn(e.powerStrip), None) def test_wrongUninstall(self): """ Ensure that attempting to uninstall an item that something else depends on fails. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) dependency.installOn(e, foo) ps = self.store.findUnique(PowerStrip) self.failUnlessRaises(dependency.DependencyError, dependency.uninstallFrom, ps, foo) def test_properOrphaning(self): """ If two installed items both depend on a third, it should be removed as soon as both installed items are removed, but no sooner. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) dependency.installOn(e, foo) ps = self.store.findUnique(PowerStrip) bb = self.store.findUnique(Breadbox) f = Blender(store=self.store) dependency.installOn(f, foo) self.assertEquals(list(self.store.query(PowerStrip)), [ps]) #XXX does ordering matter? self.assertEquals(set(dependency.installedDependents(ps, foo)), set([e, f])) self.assertEquals(set(dependency.installedRequirements(e, foo)), set([bb, ps])) self.assertEquals(list(dependency.installedRequirements(f, foo)), [ps]) dependency.uninstallFrom(e, foo) self.assertEquals(dependency.installedOn(ps), foo) dependency.uninstallFrom(f, foo) self.assertEquals(dependency.installedOn(ps), None) def test_installedUniqueRequirements(self): """ Ensure that installedUniqueRequirements lists only powerups depended on by exactly one installed powerup. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) dependency.installOn(e, foo) ps = self.store.findUnique(PowerStrip) bb = self.store.findUnique(Breadbox) f = Blender(store=self.store) dependency.installOn(f, foo) self.assertEquals(list(dependency.installedUniqueRequirements(e, foo)), [bb]) def test_customizerCalledOnce(self): """ The item customizer defined for a dependsOn attribute should only be called if an item is created implicitly to satisfy the dependency. """ foo = Kitchen(store=self.store) ps = PowerStrip(store=self.store) dependency.installOn(ps, foo) ps.voltage = 115 e = Toaster(store=self.store) dependency.installOn(e, foo) self.assertEqual(ps.voltage, 115) def test_explicitInstall(self): """ If an item is explicitly installed, it should not be implicitly uninstalled. Also, dependsOn attributes should be filled in properly even if a dependent item is not installed automatically. """ foo = Kitchen(store=self.store) ps = PowerStrip(store=self.store) dependency.installOn(ps, foo) e = Toaster(store=self.store) dependency.installOn(e, foo) self.assertEqual(e.powerStrip, ps) dependency.uninstallFrom(e, foo) self.assertEquals(dependency.installedOn(ps), foo) def test_doubleInstall(self): """ Make sure that installing two instances of a class on the same target fails, if something depends on that class, and succeeds otherwise. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) dependency.installOn(e, foo) ps = PowerStrip(store=self.store) self.failUnlessRaises(dependency.DependencyError, dependency.installOn, ps, foo) e2 = Toaster(store=self.store) dependency.installOn(e2, foo) def test_recursiveInstall(self): """ Installing an item should install all of its dependencies, and all of its dependencies, and so forth. """ foo = Kitchen(store=self.store) ic = IceCrusher(store=self.store) dependency.installOn(ic, foo) blender = self.store.findUnique(Blender) ps = self.store.findUnique(PowerStrip) self.assertEquals(dependency.installedOn(blender), foo) self.assertEquals(dependency.installedOn(ps), foo) self.assertEquals(list(dependency.installedRequirements(ic, foo)), [blender]) def test_recursiveUninstall(self): """ Removal of items should recursively remove orphaned dependencies. """ foo = Kitchen(store=self.store) ic = IceCrusher(store=self.store) dependency.installOn(ic, foo) blender = self.store.findUnique(Blender) ps = self.store.findUnique(PowerStrip) dependency.uninstallFrom(ic, foo) self.failIf(dependency.installedOn(blender)) self.failIf(dependency.installedOn(ps)) self.failIf(dependency.installedOn(ic)) def test_wrongDependsOn(self): """ dependsOn should raise an error if used outside a class definition. """ self.assertRaises(TypeError, dependency.dependsOn, Toaster) def test_referenceArgsPassthrough(self): """ dependsOn should accept (most of) attributes.reference's args. """ self.failUnless("power source" in Toaster.powerStrip.doc) self.assertEquals(Toaster.breadFactory.whenDeleted, reference.CASCADE) def test_powerupInterfaces(self): """ Make sure interfaces are powered up and down properly. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) f = Blender(store=self.store) dependency.installOn(e, foo) dependency.installOn(f, foo) self.assertEquals(IAppliance(foo), e) self.assertEquals(IBreadConsumer(foo), e) dependency.uninstallFrom(e, foo) self.assertEquals(IAppliance(foo), f) dependency.uninstallFrom(f, foo) self.assertRaises(TypeError, IAppliance, foo) def test_callbacks(self): """ 'installed' and 'uninstalled' callbacks should fire on install/uninstall. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) self.installCallbackCalled = False e.callback = lambda _: setattr(self, 'installCallbackCalled', True) dependency.installOn(e, foo) self.failUnless(self.installCallbackCalled) self.uninstallCallbackCalled = False e.callback = lambda _: setattr(self, 'uninstallCallbackCalled', True) dependency.uninstallFrom(e, foo) self.failUnless(self.uninstallCallbackCalled) def test_onlyInstallPowerups(self): """ Make sure onlyInstallPowerups doesn't load dependencies or prohibit multiple calls. """ foo = Kitchen(store=self.store) e = Toaster(store=self.store) f = Toaster(store=self.store) dependency.onlyInstallPowerups(e, foo) dependency.onlyInstallPowerups(f, foo) self.assertEquals(list(foo.powerupsFor(IBreadConsumer)), [e, f]) self.assertEquals(list(self.store.query( dependency._DependencyConnector)), []) class RequireFromSiteTests(unittest.TestCase): """ L{axiom.dependency.requiresFromSite} should allow items in either a user or site store to depend on powerups in the site store. """ def setUp(self): """ Create a L{Store} to be used as the site store for these tests. """ self.store = Store() def test_requiresFromSite(self): """ The value of a L{axiom.dependency.requiresFromSite} descriptor ought to be the powerup on the site for the instance it describes. """ dependency.installOn(RealGrid(store=self.store), self.store) substore = SubStore.createNew(self.store, ['sub']).open() self.assertEquals(PowerStrip(store=substore).draw(1), REAL_POWER) def test_requiresFromSiteDefault(self): """ The value of a L{axiom.dependency.requiresFromSite} descriptor on an item in a user store ought to be the result of invoking its default factory parameter. """ substore = SubStore.createNew(self.store, ['sub']).open() ps = PowerStrip(store=substore) self.assertEquals(ps.draw(1), FAKE_POWER) self.assertEquals(ps.grid.siteStore, self.store) def test_requiresFromSiteInSiteStore(self): """ L{axiom.dependency.requiresFromSite} should use the C{siteDefaultFactory} rather than the C{defaultFactory} to satisfy the dependency for items stored in a site store. It should use this default whether or not any item which could satisfy the requirement is installed on the site store. This behavior is important because some powerup interfaces are provided for site and user stores with radically different behaviors; for example, the substore implementation of L{IScheduler} depends on the site implementation of L{IScheduler}; if a user's substore were opened accidentally as a site store (i.e. with no parent) then the failure of the scheduler API should be obvious and immediate so that it can compensate; it should not result in an infinite recursion as the scheduler is looking for its parent. Items which wish to be stored in a site store and also depend on items in the site store can specifically adapt to the appropriate interface in the C{siteDefaultFactory} supplied to L{dependency.requiresFromSite}. """ plant = PowerPlant(store=self.store) self.assertEquals(plant.grid.siteStore, self.store) self.assertEquals(plant.grid.draw(100), FAKE_POWER) dependency.installOn(RealGrid(store=self.store), self.store) self.assertEquals(plant.grid.siteStore, self.store) self.assertEquals(plant.grid.draw(100), FAKE_POWER) def test_requiresFromSiteNoDefault(self): """ The default function shouldn't be needed or invoked if its value isn't going to be used. """ dependency.installOn(RealGrid(store=self.store), self.store) substore = SubStore.createNew(self.store, ['sub']).open() self.assertEquals(SpecifiedBadDefaults(store=substore).pump(), REAL_POWER) def test_requiresFromSiteUnspecifiedException(self): """ If a default factory function isn't supplied, an L{UnsatisfiedRequirement}, which should be a subtype of L{AttributeError}, should be raised when the descriptor is retrieved. """ lung = IronLung(store=self.store) siteLung = IronLung( store=SubStore.createNew(self.store, ['sub']).open()) self.assertRaises(UnsatisfiedRequirement, lambda : lung.grid) self.assertRaises(UnsatisfiedRequirement, lambda : siteLung.grid) default = object() self.assertIdentical(getattr(lung, 'grid', default), default) Axiom-0.6.0/axiom/test/test_files.py0000644000175000017500000000432411010066072017311 0ustar exarkunexarkun import os from twisted.trial import unittest from twisted.python import filepath from axiom.store import Store from axiom.item import Item from axiom.attributes import path class PathTesterItem(Item): schemaVersion = 1 typeName = 'test_path_thing' relpath = path() abspath = path(relative=False) class InStoreFilesTest(unittest.TestCase): """ Tests for files managed by the store. """ def _testFile(self, s): """ Shared part of file creation tests. """ f = s.newFile('test', 'whatever.txt') f.write('crap') def cb(fpath): self.assertEquals(fpath.open().read(), 'crap') return f.close().addCallback(cb) def test_createFile(self): """ Ensure that file creation works for on-disk stores. """ s = Store(filepath.FilePath(self.mktemp())) return self._testFile(s) def test_createFileInMemory(self): """ Ensure that file creation works for in-memory stores as well. """ s = Store(filesdir=filepath.FilePath(self.mktemp())) return self._testFile(s) def test_createFileInMemoryAtString(self): """ The 'filesdir' parameter should accept a string as well, for now. """ s = Store(filesdir=self.mktemp()) return self._testFile(s) def test_noFiledir(self): """ File creation should raise an error if the store has no file directory. """ s = Store() self.assertRaises(RuntimeError, s.newFile, "test", "whatever.txt") class PathAttributesTest(unittest.TestCase): def testRelocatingPaths(self): spath = self.mktemp() npath = self.mktemp() s = Store(spath) rel = s.newFile("test", "123") TEST_STR = "test 123" def cb(fpath): fpath.open("w").write(TEST_STR) PathTesterItem(store=s, relpath=fpath) s.close() os.rename(spath, npath) s2 = Store(npath) pti = list(s2.query(PathTesterItem))[0] self.assertEquals(pti.relpath.open().read(), TEST_STR) return rel.close().addCallback(cb) Axiom-0.6.0/axiom/test/test_inheritance.py0000644000175000017500000000142010272262634020505 0ustar exarkunexarkun # This module is really a placeholder: inheritance between database classes is # unsupported in XAtop right now. We are just making sure that it is # aggressively unsupported. from twisted.trial import unittest from axiom.item import Item, NoInheritance from axiom.attributes import integer class InheritanceUnsupported(unittest.TestCase): def testNoInheritance(self): class XA(Item): schemaVersion = 1 typeName = 'inheritance_test_xa' a = integer() try: class XB(XA): schemaVersion = 1 typeName = 'inheritance_test_xb' b = integer() except NoInheritance: pass else: self.fail("Expected RuntimeError but none occurred") Axiom-0.6.0/axiom/test/test_item.py0000644000175000017500000004274311104101637017155 0ustar exarkunexarkun import sys, os from twisted.trial import unittest from twisted.trial.unittest import TestCase from twisted.internet import error, protocol, defer, reactor from twisted.protocols import policies from twisted.python import log, filepath from axiom import store, item from axiom.store import Store from axiom.item import Item, declareLegacyItem from axiom.errors import ChangeRejected from axiom.test import itemtest, itemtestmain from axiom.attributes import integer, text, inmemory class ProcessFailed(Exception): pass class ProcessOutputCollector(protocol.ProcessProtocol, policies.TimeoutMixin): TIMEOUT = 60 def __init__(self, onCompletion): self.output = [] self.error = [] self.onCompletion = onCompletion self.onCompletion.addCallback(self.processOutput) def processOutput(self, output): return output def timeoutConnection(self): self.transport.signalProcess('KILL') def connectionMade(self): self.setTimeout(self.TIMEOUT) def outReceived(self, bytes): self.resetTimeout() self.output.append(bytes) def errReceived(self, bytes): self.resetTimeout() self.error.append(bytes) def processEnded(self, reason): self.setTimeout(None) if reason.check(error.ProcessTerminated): self.onCompletion.errback(ProcessFailed(self, reason)) elif self.error: self.onCompletion.errback(ProcessFailed(self, None)) else: self.onCompletion.callback(self.output) class NoAttrsItem(item.Item): typeName = 'noattrsitem' schemaVersion = 1 class TransactedMethodItem(item.Item): """ Helper class for testing the L{axiom.item.transacted} decorator. """ value = text() calledWith = inmemory() def method(self, a, b, c): self.value = u"changed" self.calledWith = [a, b, c] raise Exception("TransactedMethodItem.method test exception") method.attribute = 'value' method = item.transacted(method) class StoredNoticingItem(item.Item): """ Test item which just remembers whether or not its C{stored} method has been called. """ storedCount = integer(doc=""" The number of times C{stored} has been called on this item. """, default=0) activatedCount = integer(doc=""" The number of times C{stored} has been called on this item. """, default=0) activated = inmemory(doc=""" A value set in the C{activate} callback and nowhere else. Used to determine the ordering of C{activate} and C{stored} calls. """) def activate(self): self.activated = True def stored(self): self.storedCount += 1 self.activatedCount += getattr(self, 'activated', 0) class ItemWithDefault(item.Item): """ Item with an attribute having a default value. """ value = integer(default=10) class ItemTestCase(unittest.TestCase): """ Tests for L{Item}. """ def test_repr(self): """ L{Item.__repr__} should return a C{str} giving the name of the subclass and the names and values of all the item's attributes. """ reprString = repr(ItemWithDefault(value=123)) self.assertIn('value=123', reprString) self.assertIn('storeID=None', reprString) self.assertIn('ItemWithDefault', reprString) store = Store() item = ItemWithDefault(store=store, value=321) reprString = repr(item) self.assertIn('value=321', reprString) self.assertIn('storeID=%d' % (item.storeID,), reprString) self.assertIn('ItemWithDefault', reprString) def test_partiallyInitializedRepr(self): """ L{Item.__repr__} should return a C{str} giving some information, even if called before L{Item.__init__} has run completely. """ item = ItemWithDefault.__new__(ItemWithDefault) reprString = repr(item) self.assertIn('ItemWithDefault', reprString) def test_itemClassOrdering(self): """ Test that L{Item} subclasses (not instances) sort by the Item's typeName. """ A = TransactedMethodItem B = NoAttrsItem self.failUnless(A < B) self.failUnless(B >= A) self.failIf(A >= B) self.failIf(B <= A) self.failUnless(A != B) self.failUnless(B != A) self.failIf(A == B) self.failIf(B == A) def test_legacyItemComparison(self): """ Legacy items with different versions must not compare equal. """ legacy1 = declareLegacyItem('test_type', 1, {}) legacy2 = declareLegacyItem('test_type', 2, {}) self.assertNotEqual(legacy1, legacy2) self.assertEqual(legacy1, legacy1) self.assertEqual(legacy2, legacy2) def testCreateItem(self): st = store.Store() self.assertRaises(item.CantInstantiateItem, item.Item, store=st) def testCreateItemWithDefault(self): """ Test that attributes with default values can be set to None properly. """ st = store.Store() it = ItemWithDefault() it.value = None self.assertEqual(it.value, None) def test_storedCallbackAfterActivateCallback(self): """ Test that L{Item.stored} is only called after L{Item.activate} has been called. """ st = store.Store() i = StoredNoticingItem(store=st) self.assertEquals(i.activatedCount, 1) def test_storedCallbackOnAttributeSet(self): """ Test that L{Item.stored} is called when an item is actually added to a store and not before. """ st = store.Store() i = StoredNoticingItem() self.assertEquals(i.storedCount, 0) i.store = st self.assertEquals(i.storedCount, 1) def test_storedCallbackOnItemCreation(self): """ Test that L{Item.stored} is called when an item is created with a store. """ st = store.Store() i = StoredNoticingItem(store=st) self.assertEquals(i.storedCount, 1) def test_storedCallbackNotOnLoad(self): """ Test that pulling an item out of a store does not invoke its stored callback again. """ st = store.Store() storeID = StoredNoticingItem(store=st).storeID self.assertEquals(st.getItemByID(storeID).storedCount, 1) def testTransactedTransacts(self): """ Test that a method wrapped in C{axiom.item.transacted} is automatically run in a transaction. """ s = store.Store() i = TransactedMethodItem(store=s, value=u"unchanged") exc = self.assertRaises(Exception, i.method, 'a', 'b', 'c') self.assertEquals(exc.args, ("TransactedMethodItem.method test exception",)) self.assertEquals(i.value, u"unchanged") def testTransactedPassedArguments(self): """ Test that position and keyword arguments are passed through L{axiom.item.transacted}-wrapped methods correctly. """ s = store.Store() i = TransactedMethodItem(store=s) exc = self.assertRaises(Exception, i.method, 'a', b='b', c='c') self.assertEquals(exc.args, ("TransactedMethodItem.method test exception",)) self.assertEquals(i.calledWith, ['a', 'b', 'c']) def testTransactedPreservesAttributes(self): """ Test that the original function attributes are available on a L{axiom.item.transacted}-wrapped function. """ self.assertEquals(TransactedMethodItem.method.attribute, 'value') def testPersistentValues(self): st = store.Store() pi = itemtest.PlainItem(store=st, plain=u'hello') self.assertEqual(pi.persistentValues(), {'plain': u'hello'}) def testPersistentValuesWithoutValue(self): st = store.Store() pi = itemtest.PlainItem(store=st) self.assertEqual(pi.persistentValues(), {'plain': None}) def testCreateItemWithNoAttrs(self): st = store.Store() self.assertRaises(store.NoEmptyItems, NoAttrsItem, store=st) def testCreatePlainItem(self): st = store.Store() s = itemtest.PlainItem(store=st) def testLoadLoadedPlainItem(self): """ Test that loading an Item out of the store by its Store ID when a Python object representing that Item already exists in memory returns the same object as the one which already exists. """ st = store.Store() item = itemtest.PlainItem(store=st) self.assertIdentical(item, st.getItemByID(item.storeID)) def testLoadUnimportedPlainItem(self): """ Test that an Item in the database can be loaded out of the database, even if the module defining its Python class has not been imported, as long as its class definition has not moved since it was added to the database. """ storePath = filepath.FilePath(self.mktemp()) st = store.Store(storePath) itemID = itemtest.PlainItem(store=st, plain=u'Hello, world!!!').storeID st.close() e = os.environ.copy() # Kind of a heuristic - hmmm e['PYTHONPATH'] = os.pathsep.join(sys.path) # os.pathsep.join([dir for dir in sys.path if not dir.startswith(sys.prefix)]) d = defer.Deferred() p = ProcessOutputCollector(d) try: reactor.spawnProcess(p, sys.executable, ["python", '-Wignore', itemtestmain.__file__.rstrip('co'), storePath.path, str(itemID)], e) except NotImplementedError: raise unittest.SkipTest("Implement processes here") def cbOutput(output): self.assertEquals(''.join(output).strip(), 'Hello, world!!!') def ebBlah(err): log.err(err) self.fail(''.join(err.value.args[0].error)) return d.addCallbacks(cbOutput, ebBlah) def testDeleteCreatePair(self): # Test coverage for a bug which was present in Axiom: deleting # the newest item in a database and then creating a new item # re-used the deleted item's oid causing all manner of # ridiculuousness. st = store.Store() i = itemtest.PlainItem(store=st) oldStoreID = i.storeID i.deleteFromStore() j = itemtest.PlainItem(store=st) self.failIfEqual(oldStoreID, j.storeID) def testDeleteThenLoad(self): st = store.Store() i = itemtest.PlainItem(store=st) oldStoreID = i.storeID self.assertEquals(st.getItemByID(oldStoreID, default=1234), i) i.deleteFromStore() self.assertEquals(st.getItemByID(oldStoreID+100, default=1234), 1234) self.assertEquals(st.getItemByID(oldStoreID, default=1234), 1234) def test_duplicateDefinition(self): """ When the same typeName is defined as an item class multiple times in memory, the second definition fails with a L{RuntimeError}. """ class X(Item): dummy = integer() try: class X(Item): dummy = integer() except RuntimeError: pass else: self.fail("Duplicate definition should have failed.") def test_nonConflictingRedefinition(self): """ If the python item class associated with a typeName is garbage collected, a new python item class can re-use that type name. """ class X(Item): dummy = integer() del X class X(Item): dummy = integer() class TestItem(Item): """ Boring, behaviorless Item subclass used when we just need an item someplace. """ attribute = integer() class BrokenCommittedItem(Item): """ Item class which changes database state in its committed method. Don't write items like this, they're broken. """ attribute = integer() _committed = inmemory() def committed(self): Item.committed(self) if getattr(self, '_committed', None) is not None: self._committed(self) class CheckpointTestCase(TestCase): """ Tests for Item checkpointing. """ def setUp(self): self.checkpointed = [] def checkpoint(item): self.checkpointed.append(item) self.originalCheckpoint = TestItem.checkpoint.im_func TestItem.checkpoint = checkpoint def tearDown(self): TestItem.checkpoint = self.originalCheckpoint def _autocommitBrokenCommittedMethodTest(self, method): store = Store() item = BrokenCommittedItem(store=store) item._committed = method self.assertRaises(ChangeRejected, setattr, item, 'attribute', 0) def _transactionBrokenCommittedMethodTest(self, method): store = Store() item = BrokenCommittedItem(store=store) item._committed = method def txn(): item.attribute = 0 self.assertRaises(ChangeRejected, store.transact, txn) def test_autocommitBrokenCommittedMethodMutate(self): """ Test changing a persistent attribute in the committed (even if the original change was made in autocommit mode) callback raises L{ChangeRejected}. """ def mutate(self): self.attribute = 0 return self._autocommitBrokenCommittedMethodTest(mutate) def test_transactionBrokenCommittedMethodMutate(self): """ Test changing a persistent attribute in the committed callback raises L{ChangeRejected}. """ def mutate(item): item.attribute = 0 return self._transactionBrokenCommittedMethodTest(mutate) def test_autocommitBrokenCommittedMethodDelete(self): """ Test deleting an item in the committed (even if the original change was made in autocommit mode) callback raises L{ChangeRejected}. """ def delete(item): item.deleteFromStore() return self._autocommitBrokenCommittedMethodTest(delete) def test_transactionBrokenCommittedMethodDelete(self): """ Test changing a persistent attribute in the committed callback raises L{ChangeRejected}. """ def delete(item): item.deleteFromStore() return self._transactionBrokenCommittedMethodTest(delete) def test_autocommitBrokenCommittedMethodCreate(self): """ Test that creating a new item in a committed (even if the original change was made in autocommit mode) callback raises L{ChangeRejected} """ def create(item): TestItem(store=item.store) return self._autocommitBrokenCommittedMethodTest(create) def test_transactionBrokenCommittedMethodCreate(self): """ Test that creating a new item in a committed callback raises L{ChangeRejected}. """ def create(item): TestItem(store=item.store) return self._transactionBrokenCommittedMethodTest(create) def test_autocommitCheckpoint(self): """ Test that an Item is checkpointed when it is created outside of a transaction. """ store = Store() item = TestItem(store=store) self.assertEquals(self.checkpointed, [item]) def test_transactionCheckpoint(self): """ Test that an Item is checkpointed when the transaction it is created within is committed. """ store = Store() def txn(): item = TestItem(store=store) self.assertEquals(self.checkpointed, []) return item item = store.transact(txn) self.assertEquals(self.checkpointed, [item]) def test_queryCheckpoint(self): """ Test that a newly created Item is checkpointed before a query is executed. """ store = Store() def txn(): item = TestItem(store=store) list(store.query(TestItem)) self.assertEquals(self.checkpointed, [item]) store.transact(txn) def test_autocommitTouchCheckpoint(self): """ Test that an existing Item is checkpointed if it has an attribute changed on it. """ store = Store() item = TestItem(store=store) # Get rid of the entry that's there from creation self.checkpointed = [] item.attribute = 0 self.assertEquals(self.checkpointed, [item]) def test_transactionTouchCheckpoint(self): """ Test that in a transaction an existing Item is checkpointed if it has touch called on it and the store it is in is checkpointed. """ store = Store() item = TestItem(store=store) # Get rid of the entry that's there from creation self.checkpointed = [] def txn(): item.touch() store.checkpoint() self.assertEquals(self.checkpointed, [item]) store.transact(txn) def test_twoQueriesOneCheckpoint(self): """ Test that if two queries are performed in a transaction, a touched item only has checkpoint called on it before the first. """ store = Store() item = TestItem(store=store) # Get rid of the entry that's there from creation self.checkpointed = [] def txn(): item.touch() list(store.query(TestItem)) self.assertEquals(self.checkpointed, [item]) list(store.query(TestItem)) self.assertEquals(self.checkpointed, [item]) store.transact(txn) Axiom-0.6.0/axiom/test/test_listversions.py0000644000175000017500000000725711117560627021000 0ustar exarkunexarkun# Copyright 2008 Divmod, Inc. # See LICENSE file for details """ Tests for Axiom store version history. """ import sys, StringIO from twisted.trial import unittest from twisted.python.versions import Version from axiom.store import Store from axiom import version as axiom_version from axiom.listversions import (getSystemVersions, SystemVersion, checkSystemVersion) from axiom.scripts.axiomatic import Options as AxiomaticOptions from axiom.test.util import CommandStubMixin from axiom.plugins.axiom_plugins import ListVersions class SystemVersionTests(unittest.TestCase, CommandStubMixin): """ Tests for recording the versions of software used to open a store throughout its lifetime. """ def setUp(self): """ Create an on-disk store. """ self.dbdir = self.mktemp() self.store = Store(self.dbdir) def _reopenStore(self): """ Close the store and reopen it. """ self.store.close() self.store = Store(self.dbdir) def test_getSystemVersions(self): """ L{getSystemVersions} returns all the version plugins it finds. """ someVersions = [Version("foo", 1, 2, 3), Version("baz", 0, 0, 1)] def getSomeVersions(iface, package): return someVersions self.assertEqual(getSystemVersions(getSomeVersions), someVersions) def test_checkSystemVersion(self): """ Calling checkSystemVersion: 1. Doesn't duplicate the system version when called with the same software package versions. 2. Creates a new system version when one of the software package versions has changed. 3. Notices and creates a new system version when the system config has reverted to a previous state. """ versions = [Version("foo", 1, 2, 3)] checkSystemVersion(self.store, versions) checkSystemVersion(self.store, versions) query_results = list(self.store.query(SystemVersion)) self.assertEquals(len(query_results), 1) # Adjust a version number and try again. v = versions[0] versions[0] = Version(v.package, v.major, v.minor + 1, v.micro) checkSystemVersion(self.store, versions) query_results = list(self.store.query(SystemVersion)) self.assertEquals(len(query_results), 2) # Revert the version number and try again. versions[0] = v checkSystemVersion(self.store, versions) query_results = list(self.store.query(SystemVersion)) self.assertEquals(len(query_results), 3) # Reopening the store does not duplicate the version. self._reopenStore() query_results = list(self.store.query(SystemVersion)) self.assertEquals(len(query_results), 3) def test_commandLine(self): """ L{ListVersions} will list versions of code used in this store when invoked as an axiomatic subcommand. """ checkSystemVersion(self.store) out = StringIO.StringIO() self.patch(sys, 'stdout', out) lv = ListVersions() lv.parent = self lv.parseOptions([]) result = out.getvalue() self.assertSubstring("axiom: " + axiom_version.short(), result) def test_axiomaticSubcommand(self): """ L{ListVersions} is available as a subcommand of I{axiomatic}. """ subCommands = AxiomaticOptions().subCommands [options] = [cmd[2] for cmd in subCommands if cmd[0] == 'list-version'] self.assertIdentical(options, ListVersions) Axiom-0.6.0/axiom/test/test_mixin.py0000644000175000017500000000170310330443652017340 0ustar exarkunexarkun from twisted.trial.unittest import TestCase from axiom.item import Item from axiom.attributes import integer from axiom.slotmachine import hyper as super __metaclass__ = type class X: xm = 0 def m(self): self.xm += 1 return self.xm class Y(X): ym = 0 def m(self): ret = super(Y, self).m() self.ym += 1 ret += 1 return ret class Z(X): zm = 0 def m(self): ret = super(Z, self).m() ret += 1 self.zm += 1 return ret class XYZ(Y, Z): pass class ItemXYZ(Item, XYZ): typeName = 'item_xyz' schemaVersion = 1 xm = integer(default=0) ym = integer(default=0) zm = integer(default=0) class TestBorrowedMixins(TestCase): def testSanity(self): xyz = XYZ() val = xyz.m() self.assertEquals(val, 3) def testItemSanity(self): xyz = ItemXYZ() val = xyz.m() self.assertEquals(val, 3) Axiom-0.6.0/axiom/test/test_paginate.py0000644000175000017500000001502410542065307020007 0ustar exarkunexarkun# Copyright 2006 Divmod, Inc. See LICENSE file for details """ This module contains tests for the L{axiom.store.ItemQuery.paginate} method. """ from twisted.trial.unittest import TestCase from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, compoundIndex from axiom.test.util import QueryCounter class SingleColumnSortHelper(Item): mainColumn = integer(indexed=True) other = integer() compoundIndex(mainColumn, other) class MultiColumnSortHelper(Item): columnOne = integer() columnTwo = integer() compoundIndex(columnOne, columnTwo) class CrossTransactionIteration(TestCase): def test_separateTransactions(self): """ Verify that 'paginate' is iterable in separate transactions. """ s = Store() b1 = SingleColumnSortHelper(store=s, mainColumn=1) b2 = SingleColumnSortHelper(store=s, mainColumn=2) b3 = SingleColumnSortHelper(store=s, mainColumn=3) itr = s.transact(lambda : iter(s.query(SingleColumnSortHelper).paginate())) self.assertIdentical(s.transact(itr.next), b1) self.assertEquals(s.transact(lambda : (itr.next(), itr.next())), (b2, b3)) self.assertRaises(StopIteration, lambda : s.transact(itr.next)) def test_moreItemsNotMoreWork(self): """ Verify that each step of a paginate does not become more work as items are added. """ s = Store() self._checkEfficiency(s.query(SingleColumnSortHelper)) def test_moreItemsNotMoreWorkSorted(self): """ Verify that each step of a paginate does not become more work as more items are added even if a sort is given. """ s = Store() self._checkEfficiency(s.query(SingleColumnSortHelper, sort=SingleColumnSortHelper.mainColumn.ascending)) def test_moreItemsNotMoreWorkRestricted(self): s = Store() self._checkEfficiency(s.query(SingleColumnSortHelper, SingleColumnSortHelper.other == 6, sort=SingleColumnSortHelper.mainColumn.ascending)) def _checkEfficiency(self, qry): s = qry.store mnum = [0] def more(): mnum[0] += 1 SingleColumnSortHelper(store=s, mainColumn=mnum[0], other=6) for i in range(5): more() qc = QueryCounter(s) # Sanity check: calling paginate() shouldn't do _any_ DB work. L = [] m = qc.measure( # Let's also keep the page-size to 1, forcing the implementation to # get exactly 1 item each time. (Otherwise the first N items will # take a fixed amount of work, the next 10, and so on, but each # subsequent item will take 0, breaking our attempt to measure # below) lambda : L.append(qry.paginate(pagesize=1))) self.assertEquals(m, 0) y = L.pop() g = iter(y) # startup costs a little more, so ignore that # s.debug = True what = qc.measure(g.next) # 1 oneunit = qc.measure(g.next) # 2 otherunit = qc.measure(g.next) self.assertEquals(otherunit, oneunit) # 3 # Now, make some more data for i in range(3): more() # and make sure that doesn't increase the amount of work self.assertEquals(qc.measure(g.next), oneunit) # 4 self.assertEquals(qc.measure(g.next), oneunit) # 5 self.assertEquals(qc.measure(g.next), oneunit) # 6 # one more sanity check - we're at the end. self.assertEquals(g.next().mainColumn, 7) self.assertEquals(g.next().mainColumn, 8) self.assertEquals(list(g), []) def test_storeIDTiebreaker(self): """ Verify that items whose sort column are identical are all returned and deterministically ordered. """ s = Store() x = [SingleColumnSortHelper(store=s, mainColumn=1234) for nothing in range(10)] first = SingleColumnSortHelper(store=s, mainColumn=1233) last = SingleColumnSortHelper(store=s, mainColumn=1235) # This is sensitive to page size, so let's test it at lots of places # where edge-cases are likely to develop in the implementation. for pagesize in range(1, 30) + [1000]: # The ordering here in the asserts might look a little weird - that we # ascend by storeID in both cases regardless of the order of the sort, # but it's intentional. The storeID is merely to be a tiebreaker to # provide a stable sort. You could be sorting by any number of # compound columns, 'ascending' for your particular column might mean # something odd or contradictory to 'ascending' for storeID's # 'ascending'. If you want guaranteed stability on storeID, do that. self.assertEqual( list(s.query( SingleColumnSortHelper, sort=SingleColumnSortHelper.mainColumn.descending ).paginate(pagesize=pagesize)), [last] + x + [first]) self.assertEqual( list(s.query( SingleColumnSortHelper, sort=SingleColumnSortHelper.mainColumn.ascending ).paginate(pagesize=pagesize)), [first] + x + [last]) def test_moreThanOneColumnSort(self): """ Verify that paginate works with queries that have complex sort expressions. Note: it doesn't. """ s = Store() x = MultiColumnSortHelper(store=s, columnOne=1, columnTwo=9) y1 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=1) y2 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=2) y3 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=3) y4 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=4) z = MultiColumnSortHelper(store=s, columnOne=3, columnTwo=5) self.assertEquals(list( s.query(MultiColumnSortHelper, sort=[MultiColumnSortHelper.columnOne.ascending, MultiColumnSortHelper.columnTwo.ascending] ).paginate(pagesize=1)), [x, y1, y2, y3, y4, z]) test_moreThanOneColumnSort.todo = ( "There's no use-case for this yet, but it would be a consistent " "extension of the API.") Axiom-0.6.0/axiom/test/test_powerup.py0000644000175000017500000002122311073411205017706 0ustar exarkunexarkun from twisted.trial import unittest from axiom.item import Item from axiom.store import Store from axiom.iaxiom import IPowerupIndirector from axiom.attributes import integer, inmemory, reference from zope.interface import Interface, implements, Attribute class IValueHaver(Interface): value = Attribute(""" An integer that you can add to other integers. """) class IScalingFactor(Interface): scale = Attribute(""" An integer that a sum can be multiplied by. """) class ISumProducer(Interface): def doSum(): """ Produce a sum. """ class SumContributor(Item): schemaVersion = 1 typeName = 'test_sum_contributor' value = integer() class MinusThree(object): implements(IValueHaver) def __init__(self, otherValueHaver): self.value = otherValueHaver.value - 3 class SubtractThree(Item): schemaVersion = 1 typeName = 'test_powerup_indirection_subtractthree' valueHaver = reference() implements(IPowerupIndirector) def indirect(self, iface): assert iface is IValueHaver, repr(iface) return MinusThree(self.valueHaver) class PlusTwo(Item): """ Example powerup with installation information. """ implements(IValueHaver) powerupInterfaces = (IValueHaver,) value = integer(default=2) class PlusOneTimesFour(Item): """ Example powerup with dynamic installation information. """ implements(IScalingFactor, IValueHaver) scale = integer(default=1) value = integer(default=4) def __getPowerupInterfaces__(self, powerup): yield (IScalingFactor, 1) yield (IValueHaver, 3) class Summer(Item): schemaVersion = 1 typeName = 'test_sum_doer' sumTimes = integer() sumTotal = integer() def __init__(self, **kw): super(Summer, self).__init__(**kw) self.sumTotal = 0 self.sumTimes = 0 def doSum(self): total = 0 for haver in self.store.powerupsFor(IValueHaver): value = haver.value self.sumTotal += value total += value self.sumTimes += 1 for factor in self.store.powerupsFor(IScalingFactor): value = factor.scale self.sumTotal *= value total *= value return total class BrokenPowerup(Item): stuff = integer() def __getPowerupInterfaces__(self, pifs): return 'not a list of pairs' class PowerUpTest(unittest.TestCase): def testBasicPowerups(self): # tests an interaction between __conform__ and other stuff s = Store() mm = Summer(store=s) s.powerUp(mm, ISumProducer) s.powerUp(SumContributor(store=s, value=1), IValueHaver) s.powerUp(SumContributor(store=s, value=2), IValueHaver) s.powerUp(SumContributor(store=s, value=3), IValueHaver) self.assertEquals(mm.doSum(), 6) s.close() def testPowerupIdentity(self): s = Store() mm = Summer(store=s) s.powerUp(mm, ISumProducer) sc3 = SumContributor(store=s, value=3) s.powerUp(SumContributor(store=s, value=1), IValueHaver) s.powerUp(SumContributor(store=s, value=2), IValueHaver) s.powerUp(sc3, IValueHaver) s.powerUp(sc3, IValueHaver) self.assertEquals(mm.doSum(), 6) s.close() def test_automaticPowerupInstall(self): """ Powerups with 'powerupInterfaces' attributes can be installed on those interfaces without the caller needing to refer to them directly. """ s = Store() mm = Summer(store=s) s.powerUp(mm, ISumProducer) p = PlusTwo(store=s) s.powerUp(p) self.assertEquals(mm.doSum(), 2) def test_dynamicAutomaticPowerupInstall(self): """ Powerups with '__getPowerupInterfaces__' methods can be installed on the interfaces in the iterable that method returns. """ s = Store() mm = Summer(store=s) s.powerUp(mm, ISumProducer) p = PlusOneTimesFour(store=s) s.powerUp(p) self.assertEquals(mm.doSum(), 4) def test_dynamicAutomaticPowerupFailure(self): """ Powerups with '__getPowerupInterfaces__' methods that don't return iterables of pairs report an informative error message when powered up. """ s = Store() mm = Summer(store=s) s.powerUp(mm, ISumProducer) p = BrokenPowerup(store=s) err = self.assertRaises(ValueError, s.powerUp, p) self.assertEquals(str(err), 'return value from %r.__getPowerupInterfaces__' ' not an iterable of 2-tuples' % (p,)) def test_automaticPowerDown(self): """ Powerups with 'powerupInterfaces' attributes can be powered down automatically on the interfaces they specify. """ s = Store() p = PlusTwo(store=s) s.powerUp(p) s.powerDown(p) self.assertEquals(len(list(s.powerupsFor(IValueHaver))), 0) def test_automaticDynamicPowerDown(self): """ Powerups with '__getPowerupInterfaces__' methods can be powered down automatically on the interfaces they specify. """ s = Store() p = PlusOneTimesFour(store=s) s.powerUp(p) s.powerDown(p) self.assertEquals(len(list(s.powerupsFor(IValueHaver))), 0) self.assertEquals(len(list(s.powerupsFor(IScalingFactor))), 0) def testIndirectedPowerups(self): """ Powerups which implement L{IPowerupIndirector} should not be returned directly, the values that they return from indirect() should be returned directly. """ s = Store() mm = Summer(store=s) s.powerUp( SubtractThree( store=s, valueHaver=SumContributor(store=s, value=5)), IValueHaver) self.assertEquals(mm.doSum(), 2) s.close() def testNoIndirectedIndirection(self): """ Because it is a special interface in the powerup system, you can't have powerups for IPowerupIndirector; there's no sensible thing that could mean other than an infinite loop. Let's make sure that both looking for IPowerupIndirector and attempting to install a powerup for it will fail appropriately. """ s = Store() s3 = SubtractThree(store=s) self.assertRaises(TypeError, s.powerUp, s3, IPowerupIndirector) self.assertEqual(list(s.powerupsFor(IPowerupIndirector)), []) from twisted.application.service import IService, Service class SillyService(Item, Service): typeName = 'test_silly_service' schemaVersion = 1 started = integer(default=0) stopped = integer(default=0) running = integer(default=0) parent = inmemory() def startService(self): self.started += 1 self.running = 1 def stopService(self): assert self.running self.running = 0 self.stopped += 1 class SpecialCaseTest(unittest.TestCase): def testStoreServicePowerup(self): s = Store() ss = SillyService(store=s) s.powerUp(ss, IService) IService(s).startService() IService(s).stopService() self.assertEquals(ss.started, 1) self.assertEquals(ss.stopped, 1) self.assertEquals(ss.running, 0) def testItemServicePowerup(self): s = Store() sm = Summer(store=s) ss = SillyService(store=s) sm.powerUp(ss, IService) IService(sm).startService() IService(sm).stopService() self.assertEquals(ss.started, 1) self.assertEquals(ss.stopped, 1) self.assertEquals(ss.running, 0) class InMemoryPowerupTests(unittest.TestCase): """ Tests for the behavior of powerups which are not database-resident. """ def test_powerupsFor(self): """ L{Item.powerupsFor} returns a list the first element of which is the object previously passed to L{Item.inMemoryPowerUp}. """ powerup = object() item = SumContributor(store=Store()) item.inMemoryPowerUp(powerup, ISumProducer) self.assertEqual(list(item.powerupsFor(ISumProducer)), [powerup]) def test_inMemoryPriority(self): """ Adapting an L{Item} to an interface results in the in-memory powerup on that item for that interface even if there are database-resident powerups on that item for that interface. """ powerup = object() item = SumContributor(store=Store()) item.inMemoryPowerUp(powerup, ISumProducer) item.powerUp(item, ISumProducer) self.assertIdentical(ISumProducer(item), powerup) Axiom-0.6.0/axiom/test/test_pysqlite2.py0000644000175000017500000000163410665061677020171 0ustar exarkunexarkun""" Test cases for PySQLite2-specific parts of the backend. """ from pysqlite2.dbapi2 import OperationalError from twisted.trial.unittest import TestCase from axiom._pysqlite2 import Connection from axiom.test.cursortest import ConnectionTestCaseMixin, StubConnection class PySQLite2StubConnection(StubConnection): def timeout(self): raise OperationalError('database is locked') class ConnectionTestCase(ConnectionTestCaseMixin, TestCase): expectedUnderlyingExceptionClass = OperationalError def createStubConnection(self, *a, **kw): return PySQLite2StubConnection(*a, **kw) def createAxiomConnection(self, underlyingConnection, *a, **kw): return Connection(underlyingConnection, *a, **kw) def createRealConnection(self): """ Create a memory-backed connection for integration testing. """ return Connection.fromDatabaseName(":memory:") Axiom-0.6.0/axiom/test/test_query.py0000644000175000017500000017012711127007037017366 0ustar exarkunexarkun import operator, random from twisted.trial.unittest import TestCase, SkipTest from axiom.iaxiom import IComparison, IColumn from axiom.store import Store, ItemQuery, MultipleItemQuery from axiom.item import Item, Placeholder from axiom.test.util import QueryCounter from axiom import errors from axiom.attributes import ( reference, text, bytes, integer, AND, OR, TableOrderComparisonWrapper) class A(Item): schemaVersion = 1 typeName = 'a' reftoc = reference() type = text(indexed=True) class B(Item): schemaVersion = 1 typeName = 'b' cref = reference() name = text(indexed=True) class C(Item): schemaVersion = 1 typeName = 'c' name = text(indexed=True) class D(Item): schemaVersion = 1 typeName = 'd' id = bytes() one = bytes() two = bytes() three = bytes() four = text() class E(Item): schemaVersion = 1 typeName = 'e' name = text() transaction = text() amount = integer() class ThingWithCharacterAndByteStrings(Item): schemaVersion = 1 typeName = 'ThingWithCharacterAndByteStrings' characterString = text(caseSensitive=True) caseInsensitiveCharString = text(caseSensitive=False) byteString = bytes() class BasicQuery(TestCase): def test_rightHandStoreIDComparison(self): """ Test that a StoreID column on the right-hand side of an equality test results in a TwoAttributeComparison object rather than an AttributeValueComparison or anything else that would be wrong. """ s = Store() comparison = (A.reftoc == B.storeID) self.assertEquals( comparison.getQuery(s), '(%s.[reftoc] = %s.oid)' % ( A.getTableName(s), B.getTableName(s))) self.assertEquals(comparison.getArgs(s), []) def test_leftHandStoreIDComparison(self): """ Test that a StoreID column on the left-hand side of an equality test results in a TwoAttributeComparison object rather than an AttributeValueComparison or anything else that would be wrong. """ s = Store() comparison = (B.storeID == A.reftoc) self.assertEquals( comparison.getQuery(s), '(%s.oid = %s.[reftoc])' % ( B.getTableName(s), A.getTableName(s))) self.assertEquals(comparison.getArgs(s), []) def test_simplestQuery(self): """ Test that an ItemQuery with no comparison, sorting, or limit generates the right SQL for that operation. """ s = Store() query = ItemQuery(s, A) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s' % (A.getTableName(s),)) self.assertEquals(args, []) def test_simpleIntegerComparison(self): """ Test that an ItemQuery with a single attribute comparison on an integer attribute generates SQL with the right WHERE clause. """ s = Store() query = ItemQuery(s, E, E.amount == 0) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s WHERE (%s.[amount] = ?)' % ( E.getTableName(s), E.getTableName(s))) self.assertEquals(args, [0]) def test_simpleReferenceComparison(self): """ Test that an ItemQuery with a single attribute comparison on a reference attribute generates SQL with the right WHERE clause. """ s = Store() query = ItemQuery(s, A, A.reftoc == A.storeID) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s WHERE (%s.[reftoc] = %s.oid)' % ( A.getTableName(s), A.getTableName(s), A.getTableName(s))) self.assertEquals(args, []) def test_reversedReferenceComparison(self): """ Test that an ItemQuery with a single attribute comparison on a reference attribute with the storeID part on the left-hand side generates SQL with the right WHERE clause. """ s = Store() query = ItemQuery(s, A, A.storeID == A.reftoc) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s WHERE (%s.oid = %s.[reftoc])' % ( A.getTableName(s), A.getTableName(s), A.getTableName(s))) self.assertEquals(args, []) def test_unionComparison(self): """ Test that an ItemQuery with two comparisons joined with an L{AND} generates the right WHERE clause. """ s = Store() query = ItemQuery(s, A, AND(A.reftoc == B.storeID, B.cref == C.storeID)) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s, %s, %s ' 'WHERE ((%s.[reftoc] = %s.oid) AND ' '(%s.[cref] = %s.oid))' % ( A.getTableName(s), B.getTableName(s), C.getTableName(s), A.getTableName(s), B.getTableName(s), B.getTableName(s), C.getTableName(s))) self.assertEquals(args, []) def testBasicQuery(self): s = Store() def entesten(): c1 = C(store=s, name=u'yes') c2 = C(store=s, name=u'no') A(store=s, reftoc=c1, type=u'testc') A(store=s, reftoc=c2, type=u'testc') A(store=s, reftoc=c1, type=u'testx') yesb = B(store=s, cref=c1, name=u'correct') B(store=s, cref=c2, name=u'not correct') s.checkpoint() q = list(s.query(B, AND(AND(C.name == u'yes', A.type == u'testc'), AND(C.storeID == B.cref, A.reftoc == C.storeID)), )) self.assertEquals(q, [yesb]) s.transact(entesten) s.close() def testStringQueries(self): s = Store() def createAndStuff(): text1 = u'Hello, \u1234 world.' text2 = u'ThIs sTrInG iS nOt cAsE sEnSiTIvE. \u4567' bytes1 = '\x00, punk' x = ThingWithCharacterAndByteStrings( store=s, characterString=text1, caseInsensitiveCharString=text2, byteString=bytes1) x.checkpoint() q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.characterString == text1.lower(), )) self.failIf(q, q) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.characterString == text1.upper(), )) self.failIf(q, q) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.characterString == text1, )) self.assertEquals(q, [x]) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.caseInsensitiveCharString == text2, )) self.assertEquals(q, [x]) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.caseInsensitiveCharString == text2.lower(), )) self.assertEquals(q, [x]) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.caseInsensitiveCharString == text2.upper(), )) self.assertEquals(q, [x]) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.byteString == bytes1, )) self.assertEquals(q, [x]) q = list( s.query(ThingWithCharacterAndByteStrings, ThingWithCharacterAndByteStrings.byteString == bytes1.upper(), )) self.failIf(q, q) s.transact(createAndStuff) s.close() def testAggregateQueries(self): s = Store() def entesten(): self.assertEquals(s.query(E).count(), 0) self.assertEquals(s.query(E).getColumn("amount").sum(), 0) e1 = E(store=s, name=u'widgets', amount=37) e2 = E(store=s, name=u'widgets', amount=63) e3 = E(store=s, name=u'quatloos', amount=99, transaction=u'yes') s.checkpoint() q = s.count(E, E.name == u'widgets') self.failUnlessEqual(q, 2) q = s.sum(E.amount, E.name == u'widgets') self.failUnlessEqual(q, 100) s.transact(entesten) s.close() def testAttributeQueries(self): s = Store() def entesten(): E(store=s, name=u'b', amount=456) E(store=s, name=u'a', amount=123) E(store=s, name=u'c', amount=789) self.assertEquals(list(s.query(E, sort=E.name.ascending).getColumn("amount")), [123, 456, 789]) s.transact(entesten) s.close() def testAttributeQueryCount(self): s = Store() def entesten(): E(store=s, name=u'a', amount=123) E(store=s, name=u'b', amount=456) E(store=s, name=u'c') # no amount given self.assertEquals(s.query(E).getColumn("amount").count(), 2) s.transact(entesten) s.close() def testAttributeQueryDistinct(self): s = Store() def entesten(): E(store=s, name=u'a', amount=123) E(store=s, name=u'b', amount=789) E(store=s, name=u'a', amount=456) self.assertEquals(list(s.query(E, sort=E.name.ascending).getColumn("name").distinct()), [u"a", u"b"]) s.transact(entesten) s.close() def test_distinctQuerySQLiteBug(self): """ Test for an SQLite bug. SQLite versions 3.5.8 and 3.5.9 have a bug that causes incorrect query results under certain circumstances: - A column with an index - SELECT DISTINCT query... - ... with an IS NOT NULL comparison on that column. Upstream ticket: http://www.sqlite.org/cvstrac/tktview?tn=3236 """ s = Store() def entesten(): C(store=s, name=u'a') C(store=s, name=u'b') q = s.query(C, C.name != None).getColumn('name').distinct() self.assertEqual(sorted(q), [u'a', u'b']) s.transact(entesten) s.close() def test_itemQueryDistinct(self): """ Verify that a join which would produce duplicate rows will not produce duplicate item results. """ s = Store() def entesten(): theC = C(store=s, name=u'it') B(store=s, cref=theC) B(store=s, cref=theC) B(store=s, cref=theC) self.assertEquals(list(s.query(C, B.cref == C.storeID).distinct()), [theC]) s.transact(entesten) s.close() def test_itemQueryDistinctCount(self): """ Like L{test_itemQueryDistinct} but for the C{count} method of a distinct item query. """ s = Store() def entesten(): for n, name in (2, u'it'), (3, u'indefinite nominative'): theC = C(store=s, name=name) for i in range(n): B(store=s, cref=theC) self.assertEquals( s.query(C, B.cref == C.storeID).distinct().count(), 2) s.transact(entesten) s.close() def test_itemQueryLimitAttribute(self): """ L{ItemQuery} implements LI{IQuery} and should provide a 'limit' attribute depending on how it was created. """ s = Store() q = s.query(C, limit=3) self.assertEqual(q.limit, 3) def test_itemQueryClone(self): """ L{ItemQuery.cloneQuery} should return a new L{ItemQuery} which is equivalent, but not identical, to the original query. """ s = Store() q1 = s.query(C, limit=3) q2 = q1.cloneQuery() self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, q1.limit) self.assertNotIdentical(q1, q2) def test_itemQueryLimitSetToNone(self): """ L{ItemQuery.cloneQuery} should set the limit attribute back to None when None is passed. (e.g. it should not use None as the default value. HAX!) """ s = Store() q1 = s.query(C, limit=3) q2 = q1.cloneQuery(limit=None) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, None) def test_itemQueryCloneLimit(self): """ L{ItemQuery} implements L{IQuery} and should provide a 'cloneQuery' method which can accept a 'limit' parameter to change its limit. """ s = Store() q1 = s.query(C, limit=3) q2 = q1.cloneQuery(5) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, 5) def test_itemQueryStoreAttribute(self): """ L{ItemQuery} implements L{IQuery} and should provide a 'store' attribute that points to the store it will yield items from. """ s = Store() q = s.query(C) self.assertEqual(q.store, s) def test_itemQueryDistinctLimitAttribute(self): """ L{_DistinctQuery} implements L{IQuery} and should provide a 'limit' attribute depending on how it was created. """ s = Store() q = s.query(C, limit=3).distinct() self.assertEqual(q.limit, 3) def test_itemQueryDistinctClone(self): """ L{_DistinctQuery.cloneQuery} should return a new L{ItemQuery} which is equivalent, but not identical, to the original query. """ s = Store() q1 = s.query(C, limit=3).distinct() q2 = q1.cloneQuery() self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, q1.limit) def test_itemQueryDistinctCloneLimit(self): """ L{_DistinctQuery} implements L{IQuery} and should provide a 'cloneQuery' method which can accept a 'limit' parameter to change its limit. """ s = Store() q1 = s.query(C, limit=3).distinct() q2 = q1.cloneQuery(5) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, 5) def test_itemQueryDistinctStoreAttribute(self): """ L{_DistinctQuery} implements L{IQuery} and should provide a 'store' attribute that points to the store it will yield items from. """ s = Store() q = s.query(C).distinct() self.assertEqual(q.store, s) def testAttributeQueryMinMax(self): s = Store() def entesten(): E(store=s, amount=-4) E(store=s, amount=10) E(store=s, amount=99) E(store=s, amount=456) self.assertEquals(s.query(E).getColumn("amount").min(), -4) self.assertEquals(s.query(E).getColumn("amount").max(), 456) self.assertRaises(ValueError, s.query(D).getColumn("id").max) self.assertRaises(ValueError, s.query(D).getColumn("id").min) self.assertEquals(s.query(D).getColumn("id").min(default=41), 41) self.assertEquals(s.query(D).getColumn("id").max(default=42), 42) s.transact(entesten) s.close() def test_attributeQueryLimitAttribute(self): """ L{AttributeQuery} implements LI{IQuery} and should provide a 'limit' attribute depending on how it was created. """ s = Store() q = s.query(C, limit=3).getColumn('name') self.assertEqual(q.limit, 3) def test_attributeQueryClone(self): """ L{AttributeQuery.cloneQuery} should return a new L{AttributeQuery} which is equivalent, but not identical, to the original query. """ s = Store() q1 = s.query(C, limit=3).getColumn('name') q2 = q1.cloneQuery() self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, q1.limit) def test_attributeQueryCloneLimit(self): """ L{AttributeQuery} implements L{IQuery} and should provide a 'cloneQuery' method which can accept a 'limit' parameter to change its limit. """ s = Store() q1 = s.query(C, limit=3).getColumn('name') q2 = q1.cloneQuery(5) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, 5) self.assertEqual(q1.attribute, q2.attribute) self.assertEqual(q1.raw, q2.raw) def test_attributeQueryStoreAttribute(self): """ L{AttributeQuery} implements L{IQuery} and should provide a 'store' attribute that points to the store it will yield attributes from. """ s = Store() q = s.query(C).getColumn('name') self.assertEqual(q.store, s) class MultipleQuery(TestCase): """ Test cases for queries that yield multiple item types. """ def test_basicJoin(self): """ Verify that querying for multiple Item classes gives us the right number and type of Items, in a plausible order. """ s = Store() def entesten(): for i in range(3): c = C(store=s, name=u"C.%s" % i) B(store=s, name=u"B.%s" % (2-i), cref=c) query = s.query( (B, C), B.cref == C.storeID, sort=C.name.ascending) self.assertEquals(query.count(), 3) result = iter(query).next() self.assertEquals(len(result), 2) b, c = result self.assertTrue(isinstance(b, B)) self.assertTrue(isinstance(c, C)) self.assertEquals(b.name, u"B.2") self.assertEquals(c.name, u"C.0") s.transact(entesten) s.close() def test_count(self): """ Verify that count() gives the right result in the presence of offset and limit. """ s = Store() def entesten(): for i in range(3): c = C(store=s, name=u"C.%s" % i) B(store=s, name=u"B.%s" % (2-i), cref=c) B(store=s, name=u"B2.%s" % (2-i), cref=c) query = s.query( (B, C), B.cref == C.storeID) totalCombinations = 6 self.assertEquals(query.count(), totalCombinations) for offset in range(totalCombinations): for limit in range(totalCombinations + 1): query = s.query( (B, C), B.cref == C.storeID, offset=offset, limit=limit ) expectedCount = min((totalCombinations-offset), limit) actualCount = query.count() self.assertEquals(actualCount, expectedCount, "Got %s results with offset %s, limit %s" % ( actualCount, offset, limit)) s.transact(entesten) s.close() def test_distinct(self): """ Verify that distinct gives the right answers for a multiple item queries. """ s = Store() def entesten(): for i in range(3): c = C(store=s, name=u"C.%s" % i) b = B(store=s, name=u"B.%s" % i, cref=c) a = A(store=s, type=u"A.%s" % i, reftoc=b) a = A(store=s, type=u"A.%s" % i, reftoc=b) query = s.query( (B, C), AND(B.cref == C.storeID, A.reftoc == B.storeID), sort = C.name.ascending ) self.assertEquals(query.count(), 6) distinct = query.distinct() self.assertEquals(distinct.count(), 3) for i, (b, c) in enumerate(query.distinct()): self.assertEquals(b.name, u"B.%s" % i) self.assertEquals(c.name, u"C.%s" % i) s.transact(entesten) s.close() def test_tree(self): """ Verify that queries using the same Item class more than once behave correctly. """ s = Store() def entesten(): pops = B(store=s, name=u"Pops") dad = B(store=s, name=u"Dad", cref=pops) bro = B(store=s, name=u"Bro", cref=dad) sis = B(store=s, name=u"Sis", cref=dad) Gen1 = Placeholder(B) Gen2 = Placeholder(B) Gen3 = Placeholder(B) query = s.query( (Gen1, Gen2, Gen3), AND(Gen3.cref == Gen2.storeID, Gen2.cref == Gen1.storeID), sort=Gen3.name.ascending ) self.assertEquals(query.count(), 2) self.assertEquals(tuple(b.name for b in iter(query).next()), (u"Pops", u"Dad", u"Bro")) s.transact(entesten) s.close() def test_oneTuple(self): """ Verify that tuples of length one don't do anything crazy. """ s = Store() def entesten(): for i in range(3): C(store=s, name=u"C.%s" % i) query = s.query( (C,), sort=C.name.ascending) self.assertEquals(query.count(), 3) results = iter(query) for i in range(3): result = results.next() self.assertEquals(len(result), 1) c, = result self.assertTrue(isinstance(c, C), "%s is not a C" % c) self.assertEquals(c.name, u"C.%s" % i, i) s.transact(entesten) s.close() def test_emptyTuple(self): """ Verify that empty tuples don't give SQL crashes. """ s = Store() def entesten(): self.assertRaises(ValueError, s.query, ()) s.transact(entesten) s.close() def test_limitAttribute(self): """ L{MultipleItemQuery} implements LI{IQuery} and should provide a 'limit' attribute depending on how it was created. """ s = Store() q = s.query((B, C), limit=3) self.assertEqual(q.limit, 3) def test_clone(self): """ L{MultipleItemQuery.cloneQuery} should return a new L{MultipleItemQuery} which is equivalent, but not identical, to the original query. """ s = Store() q1 = s.query((B, C), limit=3) q2 = q1.cloneQuery() self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, q1.limit) def test_cloneLimit(self): """ L{MultipleItemQuery} implements L{IQuery} and should provide a 'cloneQuery' method which can accept a 'limit' parameter to change its limit. """ s = Store() q1 = s.query((B, C), limit=3) q2 = q1.cloneQuery(5) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, 5) def test_storeAttribute(self): """ L{MultipleItemQuery} implements L{IQuery} and should provide a 'store' attribute that points to the store it will yield attributes from. """ s = Store() q = s.query((B, C)) self.assertEqual(q.store, s) def test_limitAttributeDistinct(self): """ L{_MultipleItemDistinctQuery} implements LI{IQuery} and should provide a 'limit' attribute depending on how it was created. """ s = Store() q = s.query((B, C), limit=3) self.assertEqual(q.limit, 3) def test_cloneDistinct(self): """ L{_MultipleItemDistinctQuery.cloneQuery} should return a new L{MultipleItemQuery} which is equivalent, but not identical, to the original query. """ s = Store() q1 = s.query((B, C), limit=3).distinct() q2 = q1.cloneQuery() self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, q1.limit) def test_cloneLimitDistinct(self): """ L{_MultipleItemDistinctQuery} implements L{IQuery} and should provide a 'cloneQuery' method which can accept a 'limit' parameter to change its limit. """ s = Store() q1 = s.query((B, C), limit=3).distinct() q2 = q1.cloneQuery(5) self.assertEqual(q1.store, q2.store) self.assertEqual(q2.limit, 5) def test_storeAttributeDistinct(self): """ L{_MultipleItemDistinctQuery} implements L{IQuery} and should provide a 'store' attribute that points to the store it will yield attributes from. """ s = Store() q = s.query((B, C)).distinct() self.assertEqual(q.store, s) class QueryingTestCase(TestCase): def setUp(self): s = self.store = Store() def _createStuff(): self.d1 = D(store=s, one='d1.one', two='d1.two', three='d1.three', four=u'd1.four', id='1') self.d2 = D(store=s, one='d2.one', two='d2.two', three='d2.three', four=u'd2.four', id='2') self.d3 = D(store=s, one='d3.one', two='d3.two', three='d3.three', four=u'd3.four', id='3') s.transact(_createStuff) def tearDown(self): self.store.close() def query(self, *a, **kw): return list(self.store.query(*a, **kw)) def assertQuery(self, query, expected, args=None): """ Perform byte-for-byte comparisons against generated SQL. It would be slightly nicer if we have a SQL parser which emited an AST we could test against instead, but in the absence of that, we'll do the more difficult thing and keep the tests in sync with the SQL generator. If, someday, we have multiple backends which have different SQL generation requirements, we'll probably need to split all these tests up. While it is true that we don't actually directly care about what SQL gets generated, we do want to test the SQL generation as a /unit/, rather than indirectly testing it by making assertions about the result set it generates. This for all the usual reasons one writes unit tests (ease of debugging, refactoring, maintenance). Other tests cover the actual query behavior this SQL results in, and ideally some day we will have some tests which interact with the actual underlying rdbm to test basic assumptions we are making about the behavior of particular snippets of SQL. To sum up, changes to the SQL generation code may rightly require changes to tests which use assertQuery. If the SQL we want to generate changes, do not be afraid to update the tests. """ if args is None: args = [] sql = query.getQuery(self.store) self.assertEquals( sql, expected, "\n%r != %r\n(if SQL generation code has changed, maybe this test " "should be updated)\n" % (sql, expected)) self.assertEquals([str(a) for a in query.getArgs(self.store)], args) class TableOrder(TestCase): """ Tests for the order of tables when joins are being performed. """ def test_baseQuery(self): """ Test that the simplest query possible, one with only a FROM clause, specifies only the table for the type being queried. """ store = Store() query = store.query(A) self.assertEqual(query.fromClauseParts, [store.getTableName(A)]) def test_singleValueComparison(self): """ Test that a query with a simple comparison against a value specifies only the table for the type being queried. """ store = Store() query = store.query(A, A.type == u'value') self.assertEqual(query.fromClauseParts, [store.getTableName(A)]) def test_twoColumnComparison(self): """ Test that a query which compares one column against another from the type being queried specifies only the table for the type being queried for the FROM clause. """ store = Store() query = store.query(A, A.type == A.reftoc) self.assertEqual(query.fromClauseParts, [store.getTableName(A)]) def test_baseSort(self): """ Test that a query which includes an ORDER BY clause including a column from the type being queried properly includes only the table for the type being queried in the FROM clause. """ store = Store() query = store.query(A, sort=A.type.ascending) self.assertEqual(query.fromClauseParts, [store.getTableName(A)]) def test_comparisonWithSort(self): """ Test that a query with both a WHERE clause and an ORDER BY clause from the table being queried properly has only the table for the type being queried included in the FROM clause. """ store = Store() query = store.query(A, A.type == u'value', sort=A.type.ascending) self.assertEqual(query.fromClauseParts, [store.getTableName(A)]) def test_invalidComparison(self): """ Test that a query with a WHERE clause which does not reference the type being queried for is rejected. """ store = Store() self.assertRaises( ValueError, store.query, A, B.name == u'value') def test_invalidSort(self): """ Test that sorting by a column from a table other than the one being queried without joining on that table is rejected. """ store = Store() self.assertRaises( ValueError, store.query, A, sort=B.name.ascending) def test_singleJoin(self): """ Test that joining on another table by performing a comparison between two types properly includes both table names, in the right order (the order of tables in the comparison, left to right), in the FROM CLAUSE. """ store = Store() query = store.query(A, A.type == B.name) self.assertEqual( query.fromClauseParts, [store.getTableName(A), store.getTableName(B)]) def test_singleJoinReversed(self): """ Test that joining on another table by performing a comparison between two types properly includes both table names, in the right order (the order of tables in the comparison, left to right), in the FROM CLAUSE. """ store = Store() query = store.query(A, B.name == A.type) self.assertEqual( query.fromClauseParts, [store.getTableName(B), store.getTableName(A)]) def test_explicitTableOrder(self): """ Test that the order of tables in a join can be explicitly specified by using L{TableOrderComparisonWrapper}. """ store = Store() query = store.query( A, TableOrderComparisonWrapper( [E, D, C, B, A], AND(A.type == B.name, B.name == C.name, C.name == D.four, D.four == E.name))) self.assertEqual( query.fromClauseParts, [store.getTableName(E), store.getTableName(D), store.getTableName(C), store.getTableName(B), store.getTableName(A)]) class FirstType(Item): value = text() class SecondType(Item): value = text() ref = reference(reftype=FirstType) class QueryComplexity(TestCase): comparison = AND(FirstType.value == u"foo", SecondType.ref == FirstType.storeID, SecondType.value == u"bar") def setUp(self): self.store = Store() self.query = self.store.query(FirstType, self.comparison) # Make one of each to get any initialization taken care of so it # doesn't pollute our numbers below. FirstType(store=self.store) SecondType(store=self.store) def test_firstTableOuterLoop(self): """ Test that in a two table query, the table which appears first in the result of the getInvolvedTables method of the comparison used is the one which the outer join loop iterates over. Test this by inserting rows into the first table and checking that the number of bytecodes executed increased. """ counter = QueryCounter(self.store) counts = [] for c in range(10): counts.append(counter.measure(list, self.query)) FirstType(store=self.store) # Make sure they're not all the same self.assertEqual(len(set(counts)), len(counts)) # Make sure they're increasing self.assertEqual(counts, sorted(counts)) def test_secondTableInnerLoop(self): """ Like L{test_firstTableOuterLoop} but for the second table being iterated over by the inner loop. This creates more rows in the second table while still performing a query for which no rows in the first table satisfy the WHERE condition. This should mean that rows from the second table are never examined. """ counter = QueryCounter(self.store) count = None for i in range(10): c = counter.measure(list, self.query) if count is None: count = c self.assertEqual(count, c) SecondType(store=self.store) class AndOrQueries(QueryingTestCase): def testNoConditions(self): self.assertRaises(ValueError, AND) self.assertRaises(ValueError, OR) def testOneCondition(self): """ Test that an L{AND} or an L{OR} with a single argument collapses to just that argument. """ self.assertQuery( AND(A.type == u'Narf!'), '((%s = ?))' % (A.type.getColumnName(self.store),), ['Narf!']) self.assertQuery( OR(A.type == u'Narf!'), '((%s = ?))' % (A.type.getColumnName(self.store),), ['Narf!']) self.assertEquals(self.query(D, AND(D.one == 'd1.one')), [self.d1]) self.assertEquals(self.query(D, OR(D.one == 'd1.one')), [self.d1]) def testMultipleAndConditions(self): condition = AND(A.type == u'Narf!', A.type == u'Poiuyt!', A.type == u'Try to take over the world') expectedSQL = '((%s = ?) AND (%s = ?) AND (%s = ?))' expectedSQL %= (A.type.getColumnName(self.store),) * 3 self.assertQuery( condition, expectedSQL, ['Narf!', 'Poiuyt!', 'Try to take over the world']) self.assertEquals( self.query(D, AND(D.one == 'd1.one', D.two == 'd1.two', D.three == 'd1.three')), [self.d1]) def testMultipleOrConditions(self): condition = OR(A.type == u'Narf!', A.type == u'Poiuyt!', A.type == u'Try to take over the world') expectedSQL = '((%s = ?) OR (%s = ?) OR (%s = ?))' expectedSQL %= (A.type.getColumnName(self.store),) * 3 self.assertQuery( condition, expectedSQL, ['Narf!', 'Poiuyt!', 'Try to take over the world']) q = self.query(D, OR(D.one == 'd1.one', D.one == 'd2.one', D.one == 'd3.one')) e = [self.d1, self.d2, self.d3] self.assertEquals(sorted(q), sorted(e)) class SetMembershipQuery(QueryingTestCase): def test_oneOfValueQueryGeneration(self): """ Test that comparing an attribute for containment against a value set generates the appropriate SQL. """ values = [u'a', u'b', u'c'] comparison = C.name.oneOf(values) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(self.store), '%s IN (?, ?, ?)' % ( C.name.getColumnName(self.store),)) self.assertEquals( comparison.getArgs(self.store), values) def test_oneOfColumnQueryGeneration(self): """ Test that comparing an attribute for containment against an L{IColumn} generates the appropriate SQL. """ values = A.type comparison = C.name.oneOf(values) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(self.store), '%s IN (%s)' % ( C.name.getColumnName(self.store), A.type.getColumnName(self.store))) self.assertEquals( comparison.getArgs(self.store), []) def test_oneOfColumnQueryQueryGeneration(self): """ Test that comparing an attribute for containment against another query generates a sub-select. """ subselect = self.store.query(A).getColumn('type') comparison = C.name.oneOf(subselect) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(self.store), '%s IN (SELECT %s FROM %s)' % ( C.name.getColumnName(self.store), A.type.getColumnName(self.store), A.getTableName(self.store))) self.assertEquals( comparison.getArgs(self.store), []) def test_oneOfColumnQueryQueryGenerationWithArguments(self): """ Like test_oneOfColumnQueryQueryGeneration, but pass some values to the subselect and make sure they come out of the C{getArgs} method properly. """ value = '10' subselect = self.store.query( D, AND(D.id == value, D.four == C.name)).getColumn('one') comparison = C.name.oneOf(subselect) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(self.store), '%s IN (SELECT %s FROM %s, %s WHERE ((%s = ?) AND (%s = %s)))' % ( C.name.getColumnName(self.store), D.one.getColumnName(self.store), D.getTableName(self.store), C.getTableName(self.store), D.id.getColumnName(self.store), D.four.getColumnName(self.store), C.name.getColumnName(self.store))) self.assertEquals( map(str, comparison.getArgs(self.store)), [value]) def testOneOfWithList(self): cx = C(store=self.store, name=u'x') cy = C(store=self.store, name=u'y') cz = C(store=self.store, name=u'z') query = self.store.query( C, C.name.oneOf([u'x', u'z', u'a']), sort=C.name.ascending) self.assertEquals(list(query), [cx, cz]) def testOneOfWithSet(self): s = Store() cx = C(store=s, name=u'x') cy = C(store=s, name=u'y') cz = C(store=s, name=u'z') self.assertEquals(list(s.query(C, C.name.oneOf(set([u'x', u'z', u'a'])), sort=C.name.ascending)), [cx, cz]) class WildcardQueries(QueryingTestCase): def testNoConditions(self): self.assertRaises(TypeError, D.one.like) self.assertRaises(TypeError, D.one.notLike) def test_likeValueComparisonInvolvedTables(self): """ Test that only the table to which a column belongs is included in the involved tables set when that column is compared to a literal value using like. """ comparison = D.one.like('foo') self.assertEqual(comparison.getInvolvedTables(), [D]) def test_likeColumnComparisonInvolvedTables(self): """ Test that both the table to which a column belongs and the table to which a target column belongs are included in the involved tables set when those columns are compared using like. """ comparison = D.one.like(A.type) self.assertEqual(comparison.getInvolvedTables(), [D, A]) def testOneString(self): self.assertQuery( D.one.like('foobar%'), '(%s LIKE (?))' % (D.one.getColumnName(self.store),), ['foobar%']) self.assertQuery( D.one.notLike('foobar%'), '(%s NOT LIKE (?))' % (D.one.getColumnName(self.store),), ['foobar%']) self.assertEquals(self.query(D, D.one.like('d1.one')), [self.d1]) self.assertEquals(self.query(D, D.one.notLike('d%.one')), []) def testOneColumn(self): self.assertQuery( D.one.like(D.two), '(%s LIKE (%s))' % (D.one.getColumnName(self.store), D.two.getColumnName(self.store))) self.assertEquals(self.query(D, D.one.like(D.two)), []) def testOneColumnAndStrings(self): self.assertQuery( D.one.like('%', D.id, '%one'), '(%s LIKE (? || %s || ?))' % (D.one.getColumnName(self.store), D.id.getColumnName(self.store)), ['%', '%one']) q = self.query(D, D.one.like('%', D.id, '%one')) e = [self.d1, self.d2, self.d3] self.assertEquals(sorted(q), sorted(e)) def testMultipleColumns(self): self.assertQuery( D.one.like(D.two, '%', D.three), '(%s LIKE (%s || ? || %s))' % (D.one.getColumnName(self.store), D.two.getColumnName(self.store), D.three.getColumnName(self.store)), ['%']) self.assertEquals( self.query(D, D.one.like(D.two, '%', D.three)), []) def testStartsEndsWith(self): self.assertQuery( D.one.startswith('foo'), '(%s LIKE (?))' % (D.one.getColumnName(self.store),), ['foo%']) self.assertQuery( D.one.endswith('foo'), '(%s LIKE (?))' % (D.one.getColumnName(self.store),), ['%foo']) self.assertEquals( self.query(D, D.one.startswith('d1')), [self.d1]) self.assertEquals( self.query(D, D.one.endswith('3.one')), [self.d3]) def testStartsEndsWithColumn(self): self.assertQuery( D.one.startswith(D.two), '(%s LIKE (%s || ?))' % (D.one.getColumnName(self.store), D.two.getColumnName(self.store)), ['%']) self.assertEquals( self.query(D, D.one.startswith(D.two)), []) def testStartsEndsWithText(self): self.assertEquals( self.query(D, D.four.startswith(u'd1')), [self.d1]) self.assertEquals( self.query(D, D.four.endswith(u'2.four')), [self.d2]) def testOtherTable(self): self.assertQuery( D.one.startswith(A.type), '(%s LIKE (%s || ?))' % (D.one.getColumnName(self.store), A.type.getColumnName(self.store)), ['%']) C(store=self.store, name=u'd1.') C(store=self.store, name=u'2.one') self.assertEquals( self.query(D, D.one.startswith(C.name)), [self.d1]) self.assertEquals( self.query(D, D.one.endswith(C.name)), [self.d2]) class UniqueTest(TestCase): def setUp(self): s = self.s = Store() self.c = C(store=s, name=u'unique') self.dupc1 = C(store=s, name=u'non-unique') self.dupc2 = C(store=s, name=u'non-unique') def testUniqueFound(self): self.assertEquals(self.s.findUnique(C, C.name == u'unique'), self.c) def testUniqueNotFoundError(self): self.assertRaises(errors.ItemNotFound, self.s.findUnique, C, C.name == u'non-existent') def testUniqueNotFoundDefault(self): bing = object() self.assertEquals(bing, self.s.findUnique( C, C.name == u'non-existent', default=bing)) def testUniqueDuplicate(self): self.assertRaises(errors.DuplicateUniqueItem, self.s.findUnique, C, C.name == u'non-unique') class PlaceholderTestItem(Item): """ Type used by the placeholder support test cases. """ attr = integer() other = integer() characters = text() COMPARISON_OPS = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt] class PlaceholderTestCase(TestCase): """ Tests for placeholder table name support. """ def test_placeholderType(self): """ Test that the C{type} attribute of a Placeholder column is the Placeholder from which it came. """ p = Placeholder(PlaceholderTestItem) a = p.attr self.assertIdentical(a.type, p) def test_placeholderTableName(self): """ Test that the table name of a Placeholder is the same as the table name of the underlying Item class. """ s = Store() p = Placeholder(PlaceholderTestItem) self.assertEquals(p.getTableName(s), PlaceholderTestItem.getTableName(s)) def test_placeholderColumnInterface(self): """ Test that a column from a placeholder provides L{IColumn}. """ value = 0 p = Placeholder(PlaceholderTestItem) a = p.attr self.failUnless(IColumn.providedBy(a)) def test_placeholderAttributeValueComparison(self): """ Test that getting an attribute from a Placeholder which exists on the underlying Item class and comparing it to a value returns an L{IComparison} provider. """ value = 0 p = Placeholder(PlaceholderTestItem) for op in COMPARISON_OPS: self.failUnless(IComparison.providedBy(op(p.attr, value))) self.failUnless(IComparison.providedBy(op(value, p.attr))) def test_placeholderAttributeColumnComparison(self): """ Test that getting an attribute from a Placeholder which exists on the underlying Item class and comparing it to another column returns an L{IComparison} provider. """ value = 0 p = Placeholder(PlaceholderTestItem) for op in COMPARISON_OPS: self.failUnless(IComparison.providedBy(op(p.attr, PlaceholderTestItem.attr))) self.failUnless(IComparison.providedBy(op(PlaceholderTestItem.attr, p.attr))) def _placeholderAttributeSimilarity(self, kind, sql, args): s = Store() value = u'text' p = Placeholder(PlaceholderTestItem) # Explicitly call this, since we aren't going through ItemQuery. p.getTableAlias(s, ()) comparison = getattr(p.characters, kind)(value) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals(comparison.getQuery(s), sql % (p.characters.getColumnName(s),)) self.assertEquals( comparison.getArgs(s), [args % (value,)]) def test_placeholderAttributeSimilarity(self): """ Test that placeholder attributes can be used with the SQL LIKE operator. """ return self._placeholderAttributeSimilarity('like', '(%s LIKE (?))', '%s') def test_placeholderAttributeDisimilarity(self): """ Test that placeholder attributes can be used with the SQL NOT LIKE operator. """ return self._placeholderAttributeSimilarity('notLike', '(%s NOT LIKE (?))', '%s') def test_placeholderAttributeStartsWith(self): """ Test that placeholder attributes work with the .startswith() method. """ return self._placeholderAttributeSimilarity('startswith', '(%s LIKE (?))', '%s%%') def test_placeholderAttributeEndsWith(self): """ Test that placeholder attributes work with the .endswith() method. """ return self._placeholderAttributeSimilarity('endswith', '(%s LIKE (?))', '%%%s') def test_placeholderLikeTarget(self): """ Test that a placeholder can be used as the right-hand argument to a SQL LIKE expression. """ s = Store() p = Placeholder(PlaceholderTestItem) # Call this since we're not using ItemQuery p.getTableAlias(s, ()) comparison = PlaceholderTestItem.attr.like(p.attr) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(s), '(%s LIKE (placeholder_0.[attr]))' % ( PlaceholderTestItem.attr.getColumnName(s),)) self.assertEquals( comparison.getArgs(s), []) def test_placeholderContainment(self): """ Test that placeholder attributes can be used with the SQL IN and NOT IN operators. """ s = Store() value = [1, 2, 3] p = Placeholder(PlaceholderTestItem) # Call this since we're not using ItemQuery p.getTableAlias(s, ()) comparison = p.attr.oneOf(value) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(s), '%s IN (?, ?, ?)' % (p.attr.getColumnName(s),)) self.assertEquals( comparison.getArgs(s), value) def test_placeholderAntiContainment(self): """ Test that placeholder attributes can be used with the SQL NOT IN operator. """ s = Store() value = [1, 2, 3] p = Placeholder(PlaceholderTestItem) # Call this since we're not using ItemQuery p.getTableAlias(s, ()) comparison = p.attr.notOneOf(value) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(s), '%s NOT IN (?, ?, ?)' % (p.attr.getColumnName(s),)) self.assertEquals( comparison.getArgs(s), value) def test_placeholderContainmentTarget(self): """ Test that a placeholder attribute can be used as the right-hand argument to the SQL IN operator. """ s = Store() p = Placeholder(PlaceholderTestItem) # Call this since we're not using ItemQuery p.getTableAlias(s, ()) comparison = PlaceholderTestItem.attr.oneOf(p.attr) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(s), '%s IN (%s)' % (PlaceholderTestItem.attr.getColumnName(s), p.attr.getColumnName(s))) self.assertEquals( comparison.getArgs(s), []) def test_placeholderAntiContainmentTarget(self): """ Test that a placeholder attribute can be used as the right-hand argument to the SQL NOT IN operator. """ s = Store() p = Placeholder(PlaceholderTestItem) # Call this since we're not using ItemQuery p.getTableAlias(s, ()) comparison = PlaceholderTestItem.attr.notOneOf(p.attr) self.failUnless(IComparison.providedBy(comparison)) self.assertEquals( comparison.getQuery(s), '%s NOT IN (%s)' % (PlaceholderTestItem.attr.getColumnName(s), p.attr.getColumnName(s))) self.assertEquals( comparison.getArgs(s), []) def test_placeholderStoreID(self): """ Test that the C{storeID} attribute of a Placeholder can be retrieved just like any other attribute. """ value = 0 p = Placeholder(PlaceholderTestItem) self.failUnless(IComparison.providedBy(p.storeID > value)) def test_placeholderAttributeError(self): """ Test that trying to get an attribute from a Placeholder which is not an L{IComparison} on the underlying Item class raises an AttributeError. """ p = Placeholder(PlaceholderTestItem) self.assertRaises(AttributeError, getattr, p, 'nonexistentAttribute') def test_placeholderComparisonTables(self): """ Test that the result of L{IComparison.getInvolvedTables} on an attribute retrieved from a Placeholder returns a special placeholder item. """ s = Store() p = Placeholder(PlaceholderTestItem) value = 0 involvedTables = (p.attr > value).getInvolvedTables() self.assertEquals(len(involvedTables), 1) theTable = iter(involvedTables).next() self.assertEquals(theTable.getTableName(s), PlaceholderTestItem.getTableName(s)) self.assertEquals(theTable.getTableAlias(s, ()), 'placeholder_0') def test_placeholderComparisonQuery(self): """ Test that the result of L{IComparison.getQuery} on an attribute retrieved from a Placeholder returns SQL which correctly uses an alias of the wrapped table. """ s = Store() p = Placeholder(PlaceholderTestItem) # Explicitly call this here, since we're not going through ItemQuery or # another more reasonable codepath, which would have called it for us. p.getTableAlias(s, ()) value = 0 comparison = (p.attr > value) self.assertEquals( comparison.getQuery(s), '(placeholder_0.[attr] > ?)') self.assertEquals( comparison.getArgs(s), [value]) def test_placeholderComparisonArgs(self): """ Test that the result of L{IComparison.getArgs} on an attribute retrieved from a Placeholder returns the right values for the comparison. """ s = Store() p = Placeholder(PlaceholderTestItem) value = 0 args = (p.attr > value).getArgs(s) self.assertEquals(args, [0]) def test_placeholderQuery(self): """ Test that an ItemQuery can be created with Placeholder instances and the SQL it emits as a result correctly assigns and uses table aliases. """ s = Store() p = Placeholder(PlaceholderTestItem) sql, args = ItemQuery(s, p)._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * FROM %s AS placeholder_0' % ( PlaceholderTestItem.getTableName(s),)) def test_placeholderMultiQuery(self): """ Test that a MultipleItemQuery can be created with Placeholder instances and the SQL it emits as a result correctly assigns and uses table aliases. """ s = Store() p1 = Placeholder(PlaceholderTestItem) p2 = Placeholder(PlaceholderTestItem) sql, args = MultipleItemQuery(s, (p1, p2))._sqlAndArgs('SELECT', '*') tableName = PlaceholderTestItem.getTableName(s) self.assertEqual( sql, 'SELECT * FROM %s AS placeholder_0, %s AS placeholder_1' % ( tableName, tableName)) def test_placeholderComparison(self): """ Test that a comparison which contains a Placeholder also results in properly generated SQL. """ s = Store() p = Placeholder(PlaceholderTestItem) query = ItemQuery( s, PlaceholderTestItem, PlaceholderTestItem.attr == p.attr) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * ' 'FROM %s, %s AS placeholder_0 ' 'WHERE (%s.[attr] = placeholder_0.[attr])' % ( PlaceholderTestItem.getTableName(s), PlaceholderTestItem.getTableName(s), PlaceholderTestItem.getTableName(s))) self.assertEquals(args, []) def test_placeholderOrdering(self): """ Placeholders should be ordered based on the order in which they were instantiated. """ p1 = Placeholder(PlaceholderTestItem) p2 = Placeholder(PlaceholderTestItem) self.failUnless(p1 < p2) self.failUnless(p2 > p1) self.failIf(p1 >= p2) self.failIf(p2 <= p1) self.failIf(p1 == p2) self.failIf(p2 == p1) self.failUnless(p1 != p2) self.failUnless(p2 != p1) def test_placeholderObjectSorting(self): """ Placeholders should sort based on the order in which they were instantiated. """ placeholders = [Placeholder(PlaceholderTestItem) for n in xrange(10)] shuffledPlaceholders = list(placeholders) random.shuffle(shuffledPlaceholders) shuffledPlaceholders.sort() self.assertEquals(placeholders, shuffledPlaceholders) def test_placeholderAliasAssignment(self): """ Test that each placeholder selects a unique alias for itself. """ s = Store() p1 = Placeholder(PlaceholderTestItem) p2 = Placeholder(PlaceholderTestItem) aliases = [] self.assertEquals(p1.getTableAlias(s, aliases), 'placeholder_0') self.assertEquals(p1.getTableAlias(s, aliases), 'placeholder_0') aliases.append('placeholder_') self.assertEquals(p1.getTableAlias(s, aliases), 'placeholder_0') self.assertEquals(p2.getTableAlias(s, aliases), 'placeholder_1') def test_multiplePlaceholderComparisons(self): """ Test that using multiple different placeholders in a comparison at once properly gives each a unique name. """ s = Store() p1 = Placeholder(PlaceholderTestItem) p2 = Placeholder(PlaceholderTestItem) query = ItemQuery( s, PlaceholderTestItem, AND(PlaceholderTestItem.attr == p1.attr, PlaceholderTestItem.other == p1.other, PlaceholderTestItem.attr == p2.attr, PlaceholderTestItem.characters == p2.characters)) sql, args = query._sqlAndArgs('SELECT', '*') self.assertEquals( sql, 'SELECT * ' 'FROM %s, %s AS placeholder_0, %s AS placeholder_1 ' 'WHERE ((%s = placeholder_0.[attr]) AND ' '(%s = placeholder_0.[other]) AND ' '(%s = placeholder_1.[attr]) AND ' '(%s = placeholder_1.[characters]))' % ( PlaceholderTestItem.getTableName(s), PlaceholderTestItem.getTableName(s), PlaceholderTestItem.getTableName(s), PlaceholderTestItem.attr.getColumnName(s), PlaceholderTestItem.other.getColumnName(s), PlaceholderTestItem.attr.getColumnName(s), PlaceholderTestItem.characters.getColumnName(s))) self.assertEquals(args, []) def test_sortByPlaceholderAttribute(self): """ Test that a placeholder attribute can be used as a sort key. """ s = Store() p = Placeholder(PlaceholderTestItem) query = ItemQuery( s, p, sort=p.attr.ascending) sql, args = query._sqlAndArgs('SELECT', '*') expectedSQL = ('SELECT * ' 'FROM %s AS placeholder_0 ' 'ORDER BY placeholder_0.[attr] ASC') expectedSQL %= (p.getTableName(s),) self.assertEquals(sql, expectedSQL) self.assertEquals(args, []) def test_placeholderColumnNamesInQueryTarget(self): """ Test that placeholders are used correctly in the 'result' portion of an SQL query. """ s = Store() p = Placeholder(PlaceholderTestItem) query = ItemQuery(s, p) expectedSQL = "placeholder_0.oid, placeholder_0.[attr], placeholder_0.[characters], placeholder_0.[other]" self.assertEquals(query._queryTarget, expectedSQL) Axiom-0.6.0/axiom/test/test_queryutil.py0000644000175000017500000001036310417764262020272 0ustar exarkunexarkun import random from twisted.trial.unittest import TestCase from axiom.store import Store from axiom.item import Item from axiom.attributes import integer from axiom.queryutil import overlapping, AttributeTuple class Segment(Item): typeName = 'test_overlap_segment' schemaVersion = 1 x = integer() y = integer() def __repr__(self): return 'Segment<%d,%d>' % (self.x, self.y) class ABC(Item): typeName = 'test_tuple_queries' schemaVersion = 1 a = integer(allowNone=False) b = integer(allowNone=False) c = integer(allowNone=False) class TestQueryUtilities(TestCase): def testBetweenQuery(self): # From a drawn copy of the docstring: s = Store() G = 3 K = 4 H = C = 5 A = 8 D = 11 E = 17 B = 20 F = I = 22 L = 23 J = 24 AB = Segment(store=s, x=A, y=B) CD = Segment(store=s, x=C, y=D) EF = Segment(store=s, x=E, y=F) GH = Segment(store=s, x=G, y=H) IJ = Segment(store=s, x=I, y=J) KL = Segment(store=s, x=K, y=L) AL = Segment(store=s, x=A, y=L) CB = Segment(store=s, x=C, y=B) CA = Segment(store=s, x=C, y=A) BL = Segment(store=s, x=B, y=L) self.assertEquals( list(s.query(Segment, overlapping(Segment.x, Segment.y, A, B), sort=Segment.storeID.asc)), [AB, CD, EF, KL, AL, CB, CA, BL], ) ('(((A > 2)) ' 'OR ((A == 2) AND (B > 3)) ' 'OR ((A == 2) AND (B == 3) AND (C >= 4)))') def testTupleQueryWithTuples(self): s = Store() s.transact(self._dotestTupleQueryWithTuples, s) def _dotestTupleQueryWithTuples(self, s): L = [] for x in range(3): for y in range(3): for z in range(3): L.append((x, y, z)) shuffledL = L[:] random.shuffle(shuffledL) for a, b, c in shuffledL: ABC(a=a, b=b, c=c, store=s) at = AttributeTuple(ABC.a, ABC.b, ABC.c) for comparee in L: qobj = s.query(ABC, at > comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( L[L.index(comparee) + 1:], [(o.a, o.b, o.c) for o in qobj]) for comparee in L: qobj = s.query(ABC, at >= comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( L[L.index(comparee):], [(o.a, o.b, o.c) for o in qobj]) for comparee in L: qobj = s.query(ABC, at == comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( [comparee], [(o.a, o.b, o.c) for o in qobj]) for comparee in L: qobj = s.query(ABC, at != comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( L[:L.index(comparee)] + L[L.index(comparee) + 1:], [(o.a, o.b, o.c) for o in qobj]) for comparee in L: qobj = s.query(ABC, at < comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( L[:L.index(comparee)], [(o.a, o.b, o.c) for o in qobj]) for comparee in L: qobj = s.query(ABC, at <= comparee, sort=[ABC.a.ascending, ABC.b.ascending, ABC.c.ascending]) self.assertEquals( L[:L.index(comparee) + 1], [(o.a, o.b, o.c) for o in qobj]) Axiom-0.6.0/axiom/test/test_reference.py0000644000175000017500000001534611224376662020173 0ustar exarkunexarkunimport gc from twisted.trial.unittest import TestCase from axiom.store import Store from axiom.upgrade import registerUpgrader from axiom.item import Item, declareLegacyItem from axiom.attributes import integer, reference from axiom.errors import BrokenReference, DeletionDisallowed class Referee(Item): schemaVersion = 1 typeName = "test_reference_referee" topSecret = integer() class SimpleReferent(Item): schemaVersion = 1 typeName = "test_reference_referent" ref = reference() class BreakingReferent(Item): schemaVersion = 1 typeName = "test_reference_breaking_referent" ref = reference(whenDeleted=reference.NULLIFY) class DependentReferent(Item): ref = reference(whenDeleted=reference.CASCADE, reftype=Referee) class DisallowReferent(Item): ref = reference(whenDeleted=reference.DISALLOW, reftype=Referee) class BadReferenceTestCase(TestCase): ntimes = 10 def testSanity(self): store = Store() for i in xrange(self.ntimes): SimpleReferent(store=store, ref=Referee(store=store, topSecret=i)) (referee,) = list(store.query(Referee)) (referent,) = list(store.query(SimpleReferent)) self.assertEqual(referent.ref.topSecret, referee.topSecret) referee.deleteFromStore() referent.deleteFromStore() def testBadReferenceNone(self): """ Test that accessing a broken reference on an Item that has already been loaded into memory correctly nullifies the attribute. """ store = Store() referee = Referee(store=store, topSecret=0) referent = SimpleReferent(store=store, ref=referee) referee.deleteFromStore() referee = None gc.collect() (referent,) = list(store.query(SimpleReferent)) self.assertEqual(referent.ref, None) def testBadReferenceNoneLoading(self): """ Test that accessing a broken reference on an Item that has not yet been loaded correctly nullifies the attribute. """ store = Store() referee = Referee(store=store, topSecret=0) referent = SimpleReferent(store=store, ref=referee) referee.deleteFromStore() referee = None referent = None gc.collect() (referent,) = list(store.query(SimpleReferent)) self.assertEqual(referent.ref, None) def test_brokenReferenceException(self): """ Test that an exception is raised when a broken reference is detected when this should be impossible (ie. CASCADE or NULLIFY). """ store = Store() referee = Referee(store=store, topSecret=0) referent = BreakingReferent(store=store, ref=referee) referee.deleteFromStore() referent = None gc.collect() referent = store.findFirst(BreakingReferent) self.patch(BreakingReferent.ref, 'whenDeleted', reference.CASCADE) self.assertRaises(BrokenReference, lambda: referent.ref) def testBadReferenceNoneRevert(self): store = Store() referee = Referee(store=store, topSecret=0) referent = SimpleReferent(store=store, ref=referee) def txn(): referee.deleteFromStore() self.assertEqual(referent.ref, None) 1 / 0 self.assertRaises(ZeroDivisionError, store.transact, txn) self.assertEqual(referent.ref, referee) referent = None referee = None gc.collect() referent = store.findUnique(SimpleReferent) referee = store.findUnique(Referee) self.assertEqual(referent.ref, referee) def testBrokenReferenceDisallow(self): """ Test that deleting an item referred to by a whenDeleted == DISALLOW reference raises an exception. """ store = Store() referee = Referee(store=store, topSecret=0) referent = DisallowReferent(store=store, ref=referee) self.assertRaises(DeletionDisallowed, referee.deleteFromStore) self.assertRaises(DeletionDisallowed, store.query(Referee).deleteFromStore) def testReferenceQuery(self): store = Store() referee = Referee(store=store, topSecret=0) self.assertEqual( list(store.query(SimpleReferent, SimpleReferent.ref == Referee.storeID)), []) def testReferenceDeletion(self): store = Store() referee = Referee(store=store, topSecret=0) dep = DependentReferent(store=store, ref=referee) sid = dep.storeID self.assertIdentical(store.getItemByID(sid), dep) # sanity referee.deleteFromStore() self.assertRaises(KeyError, store.getItemByID, sid) def testBatchReferenceDeletion(self): """ Test that batch deletion removes dependent items correctly. """ store = Store() referee = Referee(store=store, topSecret=0) dep = DependentReferent(store=store, ref=referee) sid = dep.storeID store.query(Referee).deleteFromStore() self.assertRaises(KeyError, store.getItemByID, sid) def test_dummyItemReference(self): """ Getting the value of a reference attribute which has previously been set to a legacy item results in an instance of the most recent type for that item. """ store = Store() referent = SimpleReferent(store=store) oldReferee = nonUpgradedItem(store=store) referent.ref = oldReferee newReferee = referent.ref self.assertTrue( isinstance(newReferee, UpgradedItem), "%r was instance of %r, expected %r" % (newReferee, type(newReferee), UpgradedItem)) def test_dummyItemGetItemByID(self): """ Instantiating a dummy item and then getting it by its storeID should upgrade it. """ store = Store() t = nonUpgradedItem(store=store) self.assertEquals(t.__legacy__, True) self.assertFalse(store.objectCache.has(t.storeID)) t2 = store.getItemByID(t.storeID) self.assertNotIdentical(t, t2) self.assertTrue(isinstance(t2, UpgradedItem)) class UpgradedItem(Item): """ A simple item which is the current version of L{nonUpgradedItem}. """ schemaVersion = 2 dummy = integer() nonUpgradedItem = declareLegacyItem( UpgradedItem.typeName, 1, dict(dummy=integer())) def item1to2(old): """ Upgrade an nonUpgradedItem to UpgradedItem """ return old.upgradeVersion(UpgradedItem.typeName, 1, 2, dummy=old.dummy) registerUpgrader(item1to2, UpgradedItem.typeName, 1, 2) Axiom-0.6.0/axiom/test/test_reprs.py0000644000175000017500000000661710607557204017366 0ustar exarkunexarkun """ This module contains tests for C{__repr__} implementations within Axiom, to make sure they contain enough information to be useful, and work when objects are incompletely initialized. """ from axiom.iaxiom import IOrdering from axiom.attributes import integer, text, reference from axiom.item import Item from axiom.store import Store from twisted.trial.unittest import TestCase class ReprTesterItemClass(Item): intattr = integer() txtattr = text() class ReprTesterWithReference(Item): """ Test fixture for testing 'reference' attributes. """ refattr = reference() class BasicInformation(TestCase): """ Basic tests to verify that C{__repr__} implementations for various axiom objects provide enough information to debug them. """ def test_storeID(self): """ Verify that the storeID column tells you that it is a storeID, and who it belongs to. """ R = repr(ReprTesterItemClass.storeID) self.assertIn('storeID', R) self.assertIn(ReprTesterItemClass.__name__, R) self.assertNotIn('intattr', R) def test_query(self): """ Verify that queries tell you something about their target and comparison. """ s = Store() R = repr(s.query(ReprTesterItemClass, ReprTesterItemClass.intattr == 1)) self.assertIn('intattr', R) self.assertIn(ReprTesterItemClass.__name__, R) def test_simpleOrdering(self): """ Verify that ordering objects tell you something about their ordering. """ R = repr(ReprTesterItemClass.intattr.ascending) self.assertIn("intattr", R) self.assertIn("asc", R.lower()) # leaving this a little open-ended so # that we can fiddle with case, ASC and # DESC vs. ascending and descending def test_complexOrdering(self): """ Verify that complex orderings tell us about their component parts. """ R = repr(IOrdering((ReprTesterItemClass.intattr.ascending, ReprTesterItemClass.txtattr.descending))) self.assertIn(repr(ReprTesterItemClass.intattr.ascending), R) self.assertIn(repr(ReprTesterItemClass.txtattr.descending), R) def test_referenceAttribute(self): """ Verify that repr()ing an object with reference attributes will show the ID. """ s = Store() i1 = ReprTesterWithReference(store=s) i2 = ReprTesterWithReference(store=s) i1.refattr = i2 R = repr(i1) self.assertIn("reference(%d)" % (i2.storeID,), R) def test_recursiveReferenceAttribute(self): """ Verify that repr()ing an object with reference attributes that refer to themselves will not recurse. """ s = Store() i1 = ReprTesterWithReference(store=s) i1.refattr = i1 R = repr(i1) self.assertIn("reference(%d)" % (i1.storeID,), R) def test_unstoredReferenceAttribute(self): """ Verify that repr()ing an object with reference attributes that refer to items not in a store does something reasonable. """ i1 = ReprTesterWithReference() i2 = ReprTesterWithReference() i1.refattr = i2 R = repr(i1) self.assertIn("reference(unstored@%d)" % (id(i2),), R) Axiom-0.6.0/axiom/test/test_scheduler.py0000644000175000017500000006264311224737657020222 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_scheduler -*- import gc from datetime import timedelta from twisted.trial import unittest from twisted.trial.unittest import TestCase from twisted.application.service import IService from twisted.internet.defer import Deferred from twisted.internet.task import Clock from twisted.python import filepath, versions from epsilon.extime import Time from axiom.scheduler import TimedEvent, _SubSchedulerParentHook, TimedEventFailureLog from axiom.scheduler import Scheduler, SubScheduler from axiom.store import Store from axiom.item import Item from axiom.substore import SubStore from axiom.attributes import integer, text, inmemory, boolean, timestamp from axiom.iaxiom import IScheduler from axiom.dependency import installOn class TestEvent(Item): typeName = 'test_event' schemaVersion = 1 testCase = inmemory() # this won't fall out of memory due to # caching, thanks. name = text() runCount = integer() runAgain = integer() # milliseconds to add, then run again winner = integer(default=0) # is this the event that is supposed to # complete the test successfully? def __init__(self, **kw): super(TestEvent, self).__init__(**kw) self.runCount = 0 def run(self): # When this is run from testSubScheduler, we want to make an # additional assertion. There is exactly one SubStore in this # configuration, so there should be no more than one # TimedEvent with a _SubSchedulerParentHook as its runnable. if self.store.parent is not None: count = 0 s = self.store.parent for evt in s.query(TimedEvent): if isinstance(evt.runnable, _SubSchedulerParentHook): count += 1 if count > 1: return self.fail("Too many TimedEvents for the SubStore: %d" % (count,)) self.runCount += 1 if self.runAgain is not None: result = self.testCase.now() + timedelta(seconds=self.runAgain) self.runAgain = None else: result = None return result def fail(self, msg): self.testCase.fail(msg) class NotActuallyRunnable(Item): huhWhat = integer() class SpecialError(Exception): pass class SpecialErrorHandler(Item): huhWhat = integer() broken = integer(default=0) procd = integer(default=0) def run(self): self.broken = 1 raise SpecialError() def timedEventErrorHandler(self, timedEvent, failureObj): failureObj.trap(SpecialError) self.procd = 1 class HookRunner(Item): """ Runnable that simply calls a supplied hook. """ ignored = integer() hook = inmemory() def run(self): self.hook(self) class SchedTest: def tearDown(self): return IService(self.siteStore).stopService() def setUp(self): self.clock = Clock() scheduler = IScheduler(self.siteStore) self.stubTime(scheduler) IService(self.siteStore).startService() def now(self): return Time.fromPOSIXTimestamp(self.clock.seconds()) def stubTime(self, scheduler): scheduler.callLater = self.clock.callLater scheduler.now = self.now def test_implementsSchedulerInterface(self): """ Verify that IScheduler is declared as implemented. """ self.failUnless(IScheduler.providedBy(IScheduler(self.store))) def test_scheduler(self): """ Test that the ordering and timing of scheduled calls is correct. """ # create 3 timed events. the first one fires. the second one fires, # then reschedules itself. the third one should never fire because the # reactor is shut down first. assert that the first and second fire # only once, and that the third never fires. s = self.store t1 = TestEvent(testCase=self, name=u't1', store=s, runAgain=None) t2 = TestEvent(testCase=self, name=u't2', store=s, runAgain=2) t3 = TestEvent(testCase=self, name=u't3', store=s, runAgain=None) now = self.now() self.ts = [t1, t2, t3] S = IScheduler(s) # Schedule them out of order to make sure behavior doesn't # depend on tasks arriving in soonest-to-latest order. S.schedule(t2, now + timedelta(seconds=3)) S.schedule(t1, now + timedelta(seconds=1)) S.schedule(t3, now + timedelta(seconds=100)) self.clock.pump([2, 2, 2]) self.assertEqual(t1.runCount, 1) self.assertEqual(t2.runCount, 2) self.assertEqual(t3.runCount, 0) def test_unscheduling(self): """ Test the unscheduleFirst method of the scheduler. """ d = Deferred() sch = IScheduler(self.store) t1 = TestEvent(testCase=self, name=u't1', store=self.store) t2 = TestEvent(testCase=self, name=u't2', store=self.store, runAgain=None) sch.schedule(t1, self.now() + timedelta(seconds=1)) sch.schedule(t2, self.now() + timedelta(seconds=2)) sch.unscheduleFirst(t1) self.clock.advance(3) self.assertEquals(t1.runCount, 0) self.assertEquals(t2.runCount, 1) def test_inspection(self): """ Test that the L{scheduledTimes} method returns an iterable of all the times at which a particular item is scheduled to run. """ now = self.now() + timedelta(seconds=1) off = timedelta(seconds=3) sch = IScheduler(self.store) runnable = TestEvent(store=self.store, name=u'Only event') sch.schedule(runnable, now) sch.schedule(runnable, now + off) sch.schedule(runnable, now + off + off) self.assertEquals( list(sch.scheduledTimes(runnable)), [now, now + off, now + off + off]) def test_scheduledTimesDuringRun(self): """ L{Scheduler.scheduledTimes} should not include scheduled times that have already triggered. """ futureTimes = [] scheduler = IScheduler(self.store) runner = HookRunner( store=self.store, hook=lambda self: futureTimes.append( list(scheduler.scheduledTimes(self)))) then = self.now() + timedelta(seconds=1) scheduler.schedule(runner, self.now()) scheduler.schedule(runner, then) self.clock.advance(1) self.assertEquals(futureTimes, [[then], []]) def test_deletedRunnable(self): """ Verify that if a scheduled item is deleted, L{TimedEvent.invokeRunnable} just deletes the L{TimedEvent} without raising an exception. """ now = self.now() scheduler = IScheduler(self.store) runnable = TestEvent(store=self.store, name=u'Only event') scheduler.schedule(runnable, now) runnable.deleteFromStore() # Invoke it manually to avoid timing complexity. timedEvent = self.store.findUnique( TimedEvent, TimedEvent.runnable == runnable) timedEvent.invokeRunnable() self.assertEqual( self.store.findUnique( TimedEvent, TimedEvent.runnable == runnable, default=None), None) class TopStoreSchedTest(SchedTest, TestCase): def setUp(self): self.store = self.siteStore = Store() super(TopStoreSchedTest, self).setUp() def testBasicScheduledError(self): S = IScheduler(self.store) S.schedule(NotActuallyRunnable(store=self.store), self.now()) te = TestEvent(store=self.store, testCase=self, name=u't1', runAgain=None) S.schedule(te, self.now() + timedelta(seconds=1)) self.assertEquals( self.store.query(TimedEventFailureLog).count(), 0) self.clock.advance(3) self.assertEquals(te.runCount, 1) errs = self.flushLoggedErrors(AttributeError) self.assertEquals(len(errs), 1) self.assertEquals(self.store.query(TimedEventFailureLog).count(), 1) def testScheduledErrorWithHandler(self): S = IScheduler(self.store) spec = SpecialErrorHandler(store=self.store) S.schedule(spec, self.now()) te = TestEvent(store=self.store, testCase=self, name=u't1', runAgain=None) S.schedule(te, self.now() + timedelta(seconds=1)) self.assertEquals( self.store.query(TimedEventFailureLog).count(), 0) self.clock.advance(3) self.assertEquals(te.runCount, 1) errs = self.flushLoggedErrors(SpecialError) self.assertEquals(len(errs), 1) self.assertEquals(self.store.query(TimedEventFailureLog).count(), 0) self.failUnless(spec.procd) self.failIf(spec.broken) class SubSchedulerTests(SchedTest, TestCase): """ Tests for the substore implementation of IScheduler. """ def setUp(self): """ Create a site store for the substore which will contain the IScheduler being tested. Start its IService so any scheduled events will run. """ self.storePath = filepath.FilePath(self.mktemp()) self.siteStore = Store(self.storePath) super(SubSchedulerTests, self).setUp() substoreItem = SubStore.createNew(self.siteStore, ['scheduler_test']) self.substore = substoreItem.open() self.scheduler = scheduler = IScheduler(self.substore) self.stubTime(scheduler) self.store = self.substore def test_now(self): """ The user store's L{IScheduler} powerup's C{now} method returns whatever the site store's L{IScheduler} powerup's C{now} method returns. """ # I don't want the stubbed now method. del self.scheduler.now self.clock.advance(17) self.assertEquals( self.scheduler.now(), Time.fromPOSIXTimestamp(self.clock.seconds())) def test_scheduleAfterParentHookError(self): """ A transient error during a L{_SubSchedulerParentHook} run (such as failing to open the substore for whatever reason) should not disable subsequent scheduling. """ runnable = TestEvent(store=self.store) # Schedule runnable, but fail the _SubSchedulerParentHook run. self.scheduler.schedule(runnable, self.now() + timedelta(seconds=1)) hook = self.siteStore.findUnique(_SubSchedulerParentHook) def stumble(): raise IOError('Denied') object.__setattr__(hook, 'run', stumble) self.clock.advance(1) object.__delattr__(hook, 'run') self.assertEquals( self.siteStore.findUnique(TimedEventFailureLog).runnable, hook) [err] = self.flushLoggedErrors(IOError) self.assertEquals(str(err.value), 'Denied') self.assertEquals(runnable.runCount, 0) # Schedule runnable again. The restored hook in the site store should # trigger both scheduled runs in the substore now. self.scheduler.schedule(runnable, self.now() + timedelta(seconds=1)) self.clock.advance(1) self.assertEquals(runnable.runCount, 2) class SchedulerStartupTests(TestCase): """ Tests for behavior relating to L{Scheduler} service startup. """ def setUp(self): self.clock = Clock() self.store = Store() def tearDown(self): return self.stopStoreService() def now(self): return Time.fromPOSIXTimestamp(self.clock.seconds()) def time(self, offset): return self.now() + timedelta(seconds=offset) def makeScheduler(self): """ Create, install, and return a Scheduler with a fake callLater. """ scheduler = IScheduler(self.store) scheduler.callLater = self.clock.callLater scheduler.now = self.now return scheduler def startStoreService(self): """ Start the Store Service. """ service = IService(self.store) service.startService() def stopStoreService(self): service = IService(self.store) if service.running: return service.stopService() def test_scheduleWhileStopped(self): """ Test that a schedule call on a L{Scheduler} which has not been started does not result in the creation of a transient timed event. """ scheduler = self.makeScheduler() scheduler.schedule(TestEvent(store=self.store), self.time(1)) self.assertEqual(self.clock.calls, []) def test_scheduleWithRunningService(self): """ Test that if a scheduler is created and installed on a store which has a started service, a transient timed event is created when the scheduler is used. """ self.startStoreService() scheduler = self.makeScheduler() scheduler.schedule(TestEvent(store=self.store), self.time(1)) self.assertEqual(len(self.clock.calls), 1) def test_schedulerStartedWithPastEvent(self): """ Test that an existing Scheduler with a TimedEvent in the past is started immediately (but does not run the TimedEvent synchronously) when the Store Service is started. """ scheduler = self.makeScheduler() scheduler.schedule(TestEvent(store=self.store), self.time(-1)) self.assertEqual(self.clock.calls, []) self.startStoreService() self.assertEqual(len(self.clock.calls), 1) def test_schedulerStartedWithFutureEvent(self): """ Test that an existing Scheduler with a TimedEvent in the future is started immediately when the Store Service is started. """ scheduler = self.makeScheduler() scheduler.schedule(TestEvent(store=self.store), self.time(1)) self.assertEqual(self.clock.calls, []) self.startStoreService() self.assertEqual(len(self.clock.calls), 1) def test_schedulerStopped(self): """ Test that when the Store Service is stopped, the Scheduler's transient timed event is cleaned up. """ self.test_scheduleWithRunningService() d = self.stopStoreService() def cbStopped(ignored): self.assertEqual(self.clock.calls, []) d.addCallback(cbStopped) return d class MissingService(unittest.TestCase): """ A set of tests to verify that things *aren't* scheduled with the reactor when the scheduling service doesn't exist, merely persisted to the database. """ def setUp(self): """ Create a store with a scheduler installed on it and hook the C{now} and C{callLater} methods of that scheduler so their behavior can be controlled by these tests. """ self.calls = [] self.store = Store(filepath.FilePath(self.mktemp())) self.siteScheduler = IScheduler(self.store) self.siteScheduler.callLater = self._callLater def _callLater(self, s, f, *a, **k): self.calls.append((s, f, a, k)) def test_schedule(self): """ Test that if an event is scheduled against a scheduler which is not running, not transient scheduling (eg, reactor.callLater) is performed. """ return self._testSchedule(self.siteScheduler) def test_subSchedule(self): """ The same as test_schedule, except using a subscheduler. """ subst = SubStore.createNew(self.store, ['scheduler_test']) substore = subst.open() subscheduler = IScheduler(substore) return self._testSchedule(subscheduler) def _testSchedule(self, scheduler): t1 = TestEvent(store=scheduler.store) scheduler.schedule(t1, Time.fromPOSIXTimestamp(0)) self.failIf(self.calls, "Should not have had any calls: %r" % (self.calls,)) self.assertIdentical( scheduler._getNextEvent(Time.fromPOSIXTimestamp(1)).runnable, t1) class ScheduleCallingItem(Item): """ Item which invokes C{schedule} on its store's L{IScheduler} from its own C{run} method. """ ran = boolean(default=False) rescheduleFor = timestamp() def run(self): scheduler = IScheduler(self.store) scheduler.schedule(self, self.rescheduleFor) self.ran = True class NullRunnable(Item): """ Runnable item which does nothing. """ ran = boolean(default=False) def run(self): pass class SubStoreSchedulerReentrancy(TestCase): """ Test re-entrant scheduling calls on an item run by a SubScheduler. """ def setUp(self): self.clock = Clock() self.dbdir = filepath.FilePath(self.mktemp()) self.store = Store(self.dbdir) self.substoreItem = SubStore.createNew(self.store, ['sub']) self.substore = self.substoreItem.open() self.scheduler = IScheduler(self.store) self.subscheduler = IScheduler(self.substore) self.scheduler.callLater = self.clock.callLater self.scheduler.now = lambda: Time.fromPOSIXTimestamp(self.clock.seconds()) self.subscheduler.now = lambda: Time.fromPOSIXTimestamp(self.clock.seconds()) IService(self.store).startService() def tearDown(self): return IService(self.store).stopService() def _scheduleRunner(self, now, offset): scheduledAt = Time.fromPOSIXTimestamp(now + offset) rescheduleFor = Time.fromPOSIXTimestamp(now + offset + 10) runnable = ScheduleCallingItem(store=self.substore, rescheduleFor=rescheduleFor) self.subscheduler.schedule(runnable, scheduledAt) return runnable def testSchedule(self): """ Test the schedule method, as invoked from the run method of an item being run by the subscheduler. """ now = self.clock.seconds() runnable = self._scheduleRunner(now, 10) self.clock.advance(11) self.assertEqual( list(self.subscheduler.scheduledTimes(runnable)), [Time.fromPOSIXTimestamp(now + 20)]) hook = self.store.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == self.substoreItem) self.assertEqual( list(self.scheduler.scheduledTimes(hook)), [Time.fromPOSIXTimestamp(now + 20)]) def testScheduleWithLaterTimedEvents(self): """ Like L{testSchedule}, but use a SubScheduler which has pre-existing TimedEvents which are beyond the new runnable's scheduled time (to trigger the reschedule-using code-path in _SubSchedulerParentHook._schedule). """ now = self.clock.seconds() when = Time.fromPOSIXTimestamp(now + 30) null = NullRunnable(store=self.substore) self.subscheduler.schedule(null, when) runnable = self._scheduleRunner(now, 10) self.clock.advance(11) self.assertEqual( list(self.subscheduler.scheduledTimes(runnable)), [Time.fromPOSIXTimestamp(now + 20)]) self.assertEqual( list(self.subscheduler.scheduledTimes(null)), [Time.fromPOSIXTimestamp(now + 30)]) hook = self.store.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == self.substoreItem) self.assertEqual( list(self.scheduler.scheduledTimes(hook)), [Time.fromPOSIXTimestamp(20)]) def testScheduleWithEarlierTimedEvents(self): """ Like L{testSchedule}, but use a SubScheduler which has pre-existing TimedEvents which are before the new runnable's scheduled time. """ now = self.clock.seconds() when = Time.fromPOSIXTimestamp(now + 15) null = NullRunnable(store=self.substore) self.subscheduler.schedule(null, when) runnable = self._scheduleRunner(now, 10) self.clock.advance(11) self.assertEqual( list(self.subscheduler.scheduledTimes(runnable)), [Time.fromPOSIXTimestamp(now + 20)]) self.assertEqual( list(self.subscheduler.scheduledTimes(null)), [Time.fromPOSIXTimestamp(now + 15)]) hook = self.store.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == self.substoreItem) self.assertEqual( list(self.scheduler.scheduledTimes(hook)), [Time.fromPOSIXTimestamp(now + 15)]) def testMultipleEventsPerTick(self): """ Test running several runnables in a single tick of the subscheduler. """ now = self.clock.seconds() runnables = [ self._scheduleRunner(now, 10), self._scheduleRunner(now, 11), self._scheduleRunner(now, 12)] self.clock.advance(13) for n, runnable in enumerate(runnables): self.assertEqual( list(self.subscheduler.scheduledTimes(runnable)), [Time.fromPOSIXTimestamp(now + n + 20)]) hook = self.store.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == self.substoreItem) self.assertEqual( list(self.scheduler.scheduledTimes(hook)), [Time.fromPOSIXTimestamp(now + 20)]) class BackwardsCompatibilitySchedTests(object): """ L{Scheduler} and L{SubScheduler} are going to be removed. In the mean time, if someone gets their hands on one and tries to do something supported to it, it should be as if they were doing it to the suitable replacement, L{_SiteScheduler} or L{_UserScheduler}. This mixin provides tests for that behavior, as well as for the deprecations of L{Scheduler} and L{SubScheduler}. @ivar schedulerType: L{Scheduler} or L{SubScheduler}, whichever is to be tested. """ def setUp(self): """ Create a store with an instance of C{self.schedulerType} in it. """ self.store = Store() self.oldScheduler = self.schedulerType(store=self.store) warnings = self.flushWarnings([self.setUp]) self.assertEquals(len(warnings), 1) self.assertEquals(warnings[0]['category'], PendingDeprecationWarning) self.assertEquals( warnings[0]['message'], self.schedulerType.__name__ + " is deprecated since Axiom 0.5.32. " "Just adapt stores to IScheduler.") self.scheduler = IScheduler(self.store) def _checkSynonym(self, name): # Whatever the value of the attribute is on the _SiteScheduler or # _UserScheduler instance, that's what it should be on the Scheduler # or SubScheduler Item. foo = object() setattr(self.scheduler, name, foo) self.assertIdentical(getattr(self.oldScheduler, name), foo) # And rebinding the attribute on the (Sub)Scheduler Item should rebind it on # the _SiteScheduler instance. bar = object() setattr(self.oldScheduler, name, bar) self.assertIdentical(getattr(self.scheduler, name), bar) def test_now(self): """ L{Scheduler.now} is a synonym for L{_SiteScheduler.now}. """ self._checkSynonym("now") def test_tick(self): """ L{Scheduler.tick} is a synonym for L{_SiteScheduler.tick}. """ self._checkSynonym("tick") def test_schedule(self): """ L{Scheduler.schedule} is a synonym for L{_SiteScheduler.schedule}. """ self._checkSynonym("schedule") def test_scheduledTimes(self): """ L{Scheduler.scheduledTimes} is a synonym for L{_SiteScheduler.scheduledTimes}. """ self._checkSynonym("scheduledTimes") def test_unscheduleAll(self): """ L{Scheduler.unscheduleAll} is a synonym for L{_SiteScheduler.unscheduleAll}. """ self._checkSynonym("unscheduleAll") def test_reschedule(self): """ L{Scheduler.reschedule} is a synonym for L{_SiteScheduler.reschedule}. """ self._checkSynonym("reschedule") def test_deprecated(self): """ Loading an existing L{Scheduler} from a L{Store} emits a deprecation warning. """ storeID = self.oldScheduler.storeID del self.oldScheduler gc.collect() scheduler = self.store.getItemByID(storeID) warnings = self.flushWarnings([self.test_deprecated]) self.assertEquals(len(warnings), 1) self.assertEquals(warnings[0]['category'], PendingDeprecationWarning) self.assertEquals( warnings[0]['message'], self.schedulerType.__name__ + " is deprecated since Axiom 0.5.32. " "Just adapt stores to IScheduler.") class BackwardsCompatibilitySchedulerTests(BackwardsCompatibilitySchedTests, TestCase): schedulerType = Scheduler def test_interface(self): """ L{Scheduler} provides L{IScheduler} (which it proxies) and L{IService} (which is a no-op). """ self.assertTrue(IScheduler.providedBy(self.oldScheduler)) self.assertTrue(IService.providedBy(self.oldScheduler)) class BackwardsCompatibilitySubSchedulerTests(BackwardsCompatibilitySchedTests, TestCase): schedulerType = SubScheduler def test_interface(self): """ L{SubScheduler} provides L{IScheduler} (which it proxies). """ self.assertTrue(IScheduler.providedBy(self.oldScheduler)) Axiom-0.6.0/axiom/test/test_sequence.py0000644000175000017500000004652210420010677020031 0ustar exarkunexarkun from twisted.trial import unittest from axiom.attributes import integer from axiom.errors import NoCrossStoreReferences from axiom.item import Item from axiom.sequence import List from axiom.store import Store class SomeItem(Item): schemaVersion = 1 typeName = 'test_sequence_some_item' foo = integer() def __repr__(self): return '' % (self.foo, id(self)) def __cmp__(self, other): if not isinstance(other, self.__class__): return cmp(super(SomeItem, self), other) return cmp(self.foo, other.foo) class SequenceTestCase(unittest.TestCase): def setUp(self): self.store = Store() self.xy = SomeItem(store=self.store, foo=-1) for i in range(10): item = SomeItem(store=self.store, foo=i) setattr(self, 'i%i'%i, item) def assertContents(self, seq, L): self.assertEquals(len(seq), len(L)) for i in range(len(L)): self.assertIdentical(seq[i], L[i]) class TestSequenceOfItems(SequenceTestCase): def test_createItem(self): seq = List(store=self.store) self.assertEquals(len(seq), 0) def test_createItemWithDefaults(self): seq = List([self.i0, self.i1], store=self.store) self.assertContents(seq, [self.i0, self.i1]) def test_createItemWithAliens(self): otherStore = Store() alien1 = SomeItem(store=otherStore, foo=1) alien2 = SomeItem(store=otherStore, foo=2) alien3 = SomeItem(store=otherStore, foo=3) self.assertRaises(NoCrossStoreReferences, List, [alien1, alien2, alien3], store=self.store) def test_appendAndGetItem(self): seq = List(store=self.store) seq.append(self.i0) self.assertEquals(len(seq), 1) self.assertEquals(seq[0], self.i0) seq.append(self.i1) seq.append(self.i2) self.assertEquals(seq[1], self.i1) self.assertEquals(seq[2], self.i2) def test_appendSliceSyntax(self): seq = List(store=self.store) self.assertContents(seq, []) seq[len(seq):len(seq)] = [self.i0] seq[len(seq):len(seq)] = [self.i1] seq[len(seq):len(seq)] = [self.i2] self.assertContents(seq, [self.i0, self.i1, self.i2]) test_appendSliceSyntax.todo = "Slices are not supported yet" def test_indexErrors(self): seq = List(store=self.store) self.assertRaises(IndexError, seq.__getitem__, 0) seq.append(self.i0) self.assertEquals(seq[0], self.i0) self.assertRaises(IndexError, seq.__getitem__, 1) def test_negativeIndices(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) self.assertEquals(seq[-1], self.i2) self.assertEquals(seq[-2], self.i1) self.assertEquals(seq[-3], self.i0) self.assertRaises(IndexError, seq.__getitem__, -4) def test_setItem(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) self.assertEquals(len(seq), 2) self.assertEquals(seq[0], self.i0) self.assertEquals(seq[1], self.i1) seq[1] = self.i2 self.assertEquals(seq[1], self.i2) def test_delItem(self): seq = List(store=self.store) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) self.assertEquals(seq.length, 3) self.assertEquals(seq[0], self.i1) self.assertEquals(seq[1], self.i2) self.assertEquals(seq[2], self.i3) del seq[1] self.assertEquals(seq.length, 2) self.assertEquals(seq[0], self.i1) self.assertEquals(seq[1], self.i3) self.assertRaises(IndexError, seq.__getitem__, 2) class TestSequenceOperations(SequenceTestCase): """ These test cases were taken from the list of sequence operations found at http://docs.python.org/lib/typesseq.html """ def test_x_in_s(self): seq = List(store=self.store) seq.append(self.i0) self.failUnless(self.i0 in seq) self.failIf(self.xy in seq) def test_x_not_in_s(self): seq = List(store=self.store) seq.append(self.i0) self.failUnless(self.xy not in seq) self.failIf(self.i0 not in seq) def test_s_plus_t(self): L1 = List(store=self.store) L2 = List(store=self.store) L1.append(self.i0) L2.append(self.i1) # XXX ASSUMPTION: all operations which return another # instance will return regular lists, *not* Lists. L = L1 + L2 self.assertEquals(L, [self.i0, self.i1]) def test_shallow_copies(self, n=3): seq = List(store=self.store) seq.append(self.i0) for L in (seq * n, n * seq): self.assertEquals(L, [self.i0]*n) def test_index(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) self.assertIdentical(seq[0], self.i0) self.assertIdentical(seq[1], self.i1) self.failIfIdentical(seq[0], self.i1) self.failIfIdentical(seq[1], self.i0) def test_slices(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) self.assertEquals(seq[0:2], [self.i0, self.i1]) self.assertEquals(seq[0:3], [self.i0, self.i1, self.i2]) self.assertEquals(seq[1:0], []) self.assertEquals(seq[-1:], [self.i3]) test_slices.todo = "Slices are not supported yet" def test_slice_with_step(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) self.assertEquals(seq[0:4:2], [self.i0, self.i2]) self.assertEquals(seq[1:5:2], [self.i1, self.i3]) test_slice_with_step.todo = "Slices are not supported yet" def test_len(self): seq = List(store=self.store) self.assertEquals(len(seq), 0) seq.append(self.i0) self.assertEquals(len(seq), 1) seq.append(self.i0) self.assertEquals(len(seq), 2) seq.append(self.i0) self.assertEquals(len(seq), 3) def test_min_max(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) self.assertIdentical(min(seq), self.i0) self.assertIdentical(max(seq), self.i2) class TestMutableSequenceOperations(SequenceTestCase): """ These test cases were taken from the list of sequence operations found at http://docs.python.org/lib/typesseq-mutable.html Some may duplicate L{TestSequenceOperations}, but who cares? """ def test_indexAssignment(self): seq = List(store=self.store) seq.append(self.i0) self.assertIdentical(seq[0], self.i0) seq[0] = self.i1 self.assertIdentical(seq[0], self.i1) def test_sliceAssignment(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i0) seq.append(self.i0) seq.append(self.i0) self.assertContents(seq, [self.i0, self.i0, self.i0, self.i0]) seq[1:3] = [self.i1, self.i2] self.assertContents(seq, [self.i0, self.i1, self.i2, self.i0]) seq[1:3] = [self.i3] self.assertContents(seq, [self.i0, self.i3, self.i0]) seq[1:3] = [] self.assertContents(seq, [self.i0]) test_sliceAssignment.todo = "Slices are not supported yet" def test_deleteSlice(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) del seq[1:3] self.assertEquals(len(seq), 2) self.assertIdentical(seq[0], self.i0) self.assertIdentical(seq[1], self.i3) test_deleteSlice.todo = "Slices are not supported yet" def test_sliceAssignmentStep(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) seq.append(self.i4) seq.append(self.i5) seq.append(self.i6) seq[1:5:2] = [self.i7, self.i7] self.assertContents(seq, [self.i0, self.i7, self.i2, self.i7, self.i4, self.i5, self.i6]) test_sliceAssignmentStep.todo = "Slices are not supported yet" def test_deleteSliceStep(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) seq.append(self.i4) seq.append(self.i5) seq.append(self.i6) del seq[1:6:2] self.assertContents(seq, [self.i0, self.i2, self.i4, self.i6]) test_deleteSliceStep.todo = "Slices are not supported yet" def test_append(self): seq = List(store=self.store) self.assertContents(seq, []) seq.append(self.i0) self.assertContents(seq, [self.i0]) seq.append(self.i1) self.assertContents(seq, [self.i0, self.i1]) def test_extend(self): L1 = List(store=self.store) L1.append(self.i0) L1.append(self.i1) L2 = List(store=self.store) L2.append(self.i2) L2.append(self.i3) L1.extend(L2) self.assertContents(L1, [self.i0, self.i1, self.i2, self.i3]) def test_extendSliceSyntax(self): L1 = List(store=self.store) L1.append(self.i0) L1.append(self.i1) L2 = List(store=self.store) L2.append(self.i2) L2.append(self.i3) L1[len(L1):len(L1)] = L2 self.assertContents(L1, [self.i0, self.i1, self.i2, self.i3]) test_extendSliceSyntax.todo = "Slices are not supported yet" def test_count(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i0) seq.append(self.i2) seq.append(self.i0) seq.append(self.i2) self.assertEquals(seq.count(self.i0), 3) self.assertEquals(seq.count(self.i1), 1) self.assertEquals(seq.count(self.i2), 2) self.assertEquals(seq.count(self.i3), 0) def test_index(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i0) seq.append(self.i2) seq.append(self.i0) seq.append(self.i2) self.assertEquals(seq.index(self.i0), 0) self.assertEquals(seq.index(self.i0, 0), 0) self.assertEquals(seq.index(self.i0, 1), 2) self.assertEquals(seq.index(self.i1), 1) self.assertEquals(seq.index(self.i1, 1), 1) self.assertEquals(seq.index(self.i2), 3) self.assertEquals(seq.index(self.i2, 4), 5) self.assertRaises(ValueError, seq.index, self.i3) self.assertRaises(ValueError, seq.index, self.i1, 3) self.assertRaises(ValueError, seq.index, self.i0, 1, 1) # TODO: support negative slice boundaries def test_insert(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i0) seq.insert(1, self.i9) self.assertContents(seq, [self.i0, self.i9, self.i0]) def test_insertSliceSyntax(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i0) seq[1:1] = self.i9 self.assertContents(seq, [self.i0, self.i9, self.i0]) test_insertSliceSyntax.todo = "Slices are not supported yet" def test_pop(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) seq.append(self.i4) self.assertIdentical(seq.pop(), self.i4) self.assertContents(seq, [self.i0, self.i1, self.i2, self.i3]) self.assertIdentical(seq.pop(0), self.i0) self.assertContents(seq, [self.i1, self.i2, self.i3]) self.assertIdentical(seq.pop(-2), self.i2) self.assertContents(seq, [self.i1, self.i3]) self.assertRaises(IndexError, seq.pop, 13) def test_remove(self): seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i0) seq.append(self.i1) seq.append(self.i3) seq.append(self.i0) self.assertContents(seq, [self.i0, self.i1, self.i2, self.i0, self.i1, self.i3, self.i0]) seq.remove(self.i0) self.assertContents(seq, [self.i1, self.i2, self.i0, self.i1, self.i3, self.i0]) seq.remove(self.i0) self.assertContents(seq, [self.i1, self.i2, self.i1, self.i3, self.i0]) seq.remove(self.i2) self.assertContents(seq, [self.i1, self.i1, self.i3, self.i0]) self.assertRaises(ValueError, seq.remove, self.i4) def test_reverse(self): # UPDATE my_list_tbl SET _index = (_index - listlength + 1) * -1 seq = List(store=self.store) seq.append(self.i0) seq.append(self.i1) seq.append(self.i2) seq.append(self.i3) self.assertContents(seq, [self.i0, self.i1, self.i2, self.i3]) seq.reverse() self.assertContents(seq, [self.i3, self.i2, self.i1, self.i0]) ''' s.sort([cmp[, key[, reverse]]]) From http://docs.python.org/lib/typesseq-mutable.html The sort() method takes optional arguments for controlling the comparisons. cmp specifies a custom comparison function of two arguments (list items) which should return a negative, zero or positive number depending on whether the first argument is considered smaller than, equal to, or larger than the second argument: "cmp=lambda x,y: cmp(x.lower(), y.lower())" key specifies a function of one argument that is used to extract a comparison key from each list element:"key=str.lower" reverse is a boolean value. If set to True, then the list elements are sorted as if each comparison were reversed. In general, the key and reverse conversion processes are much faster than specifying an equivalent cmp function. This is because cmp is called multiple times for each list element while key and reverse touch each element only once. Changed in version 2.3: Support for None as an equivalent to omitting cmp was added. Changed in version 2.4: Support for key and reverse was added. ''' def test_sort(self): seq = List(store=self.store) def seq_randomize(): while len(seq): seq.pop() seq.append(self.i3) seq.append(self.i0) seq.append(self.i1) seq.append(self.i4) seq.append(self.i2) seq_randomize() seq.sort() self.assertContents(seq, [self.i0, self.i1, self.i2, self.i3, self.i4]) seq_randomize() seq.sort(lambda x,y: cmp(y,x)) self.assertContents(seq, [self.i4, self.i3, self.i2, self.i1, self.i0]) def strangecmp(x, y): xfoo, yfoo = x.foo, y.foo if xfoo < 3: xfoo += 100 if yfoo < 3: yfoo += 100 return cmp(xfoo, yfoo) seq_randomize() seq.sort(strangecmp) self.assertContents(seq, [self.i3, self.i4, self.i0, self.i1, self.i2]) seq_randomize() seq.sort(None, lambda x:x, True) self.assertContents(seq, [self.i4, self.i3, self.i2, self.i1, self.i0]) seq_randomize() seq.sort(strangecmp, lambda x:x, True) self.assertContents(seq, [self.i2, self.i1, self.i0, self.i4, self.i3]) def test_count(self): seq = List(store=self.store) seq.append(self.i1) seq.append(self.i2) seq.append(self.i2) seq.append(self.i3) seq.append(self.i3) seq.append(self.i3) self.assertEquals(seq.count(self.i1), 1) self.assertEquals(seq.count(self.i2), 2) self.assertEquals(seq.count(self.i3), 3) def test_contains(self): seq = List(store=self.store) seq.append(self.i1) seq.append(self.i2) self.failUnless(self.i1 in seq) self.failUnless(self.i2 in seq) self.failIf(self.i3 in seq) def test_multicontains(self): seq1 = List(store=self.store) seq2 = List(store=self.store) seq1.append(self.i1) seq2.append(self.i2) self.failUnless(self.i1 in seq1) self.failUnless(self.i2 in seq2) self.failIf(self.i1 in seq2) self.failIf(self.i2 in seq1) def test_multidelitem(self): seq1 = List(store=self.store) seq2 = List(store=self.store) seq1.append(self.i1) seq1.append(self.i2) seq2.append(self.i1) seq2.append(self.i2) del seq1[0] self.assertIdentical(seq2[0], self.i1) self.assertIdentical(seq2[1], self.i2) Axiom-0.6.0/axiom/test/test_slotmachine.py0000644000175000017500000000731611203025675020531 0ustar exarkunexarkun from twisted.trial import unittest from axiom.slotmachine import SetOnce, Attribute, SlotMachine, SchemaMachine class A(SlotMachine): slots = ['a', 'initialized'] class B(SchemaMachine): test = Attribute() initialized = SetOnce() other = SetOnce(default=None) nondescriptor = 'readable' method = lambda self: self class Bsub(B): pass class C(object): __slots__ = ['a', 'b', 'c', 'initialized'] class D: def activate(self): self.initialized = 1 self.test = 2 self.a = 3 self.b = 4 self.c = 5 class E(object): pass class X(B, A, C, D, E): pass class Y(Bsub): blah = SetOnce() class ClassWithDefault: x = 1 class DefaultTest(SchemaMachine, ClassWithDefault): x = Attribute() class Decoy(ClassWithDefault): pass class DecoyDefault(Decoy, DefaultTest): pass class DefaultOverride(DefaultTest): x = 5 class SlotMachineTest(unittest.TestCase): def assertBSchema(self, b): """ Test that the given instance conforms to L{B}'s schema. """ b.test = 1 b.test = 2 self.assertEquals(b.test, 2) self.assertRaises(AttributeError, setattr, b, 'nottest', 'anything') self.assertRaises(AttributeError, getattr, b, 'nottest') self.assertEquals(b.other, None) b.other = 7 self.assertEquals(b.other, 7) self.assertRaises(AttributeError, setattr, b, 'other', 'anything') self.assertEquals(b.nondescriptor, 'readable') err = self.assertRaises(AttributeError, setattr, b, 'nondescriptor', 'writable') self.assertEquals(str(err), "%r can't set attribute 'nondescriptor'" % (type(b).__name__,)) self.assertEquals(b.nondescriptor, 'readable') self.assertEquals(b.method(), b) err = self.assertRaises(AttributeError, setattr, b, 'method', lambda: 5) self.assertEquals(str(err), "%r can't set attribute 'method'" % (type(b).__name__,)) self.assertEquals(b.method(), b) def testAttributesNotAllowed(self): b = B() self.assertBSchema(b) def testTrivialSubclass(self): b = Bsub() self.assertBSchema(b) def testSetOnce(self): b = B() b.initialized = 1 self.assertRaises(AttributeError, setattr, b, 'initialized', 2) self.assertEquals(b.initialized, 1) def testClassicMixin(self): x = X() x.activate() self.assertRaises(AttributeError, setattr, x, 'initialized', 2) self.assertRaises(AttributeError, setattr, x, 'nottest', 'anything') self.assertRaises(AttributeError, getattr, x, 'nottest') def testAttributesTraverseDeepHierarchy(self): y = Y() self.assertBSchema(y) def test_baseDefault(self): """ L{DefaultTest.x} should take precedence over L{ClassWithDefault.x}. """ dt = DefaultTest() # self.failUnless('x' in dt.__slots__, 'x not in '+repr(dt.__slots__) ) dt.x = 2 def test_decoyDefault(self): """ Same as L{test_baseDefault}, but with a decoy subclass. """ d = DecoyDefault() d.x = 2 def test_descriptorOverride(self): """ L{DefaultOverride.x} should take precedence over L{DefaultTest.x} and prevent the I{x} attribute from being set. """ d = DefaultOverride() err = self.assertRaises(AttributeError, setattr, d, 'x', 23) self.assertEquals(str(err), "'DefaultOverride' can't set attribute 'x'") self.assertEquals(d.x, 5) Axiom-0.6.0/axiom/test/test_substore.py0000644000175000017500000001236611010066072020062 0ustar exarkunexarkunfrom twisted.application.service import Service, IService from twisted.python import filepath from twisted.trial import unittest from axiom.store import Store from axiom.item import Item from axiom.substore import SubStore from axiom.attributes import text, bytes, boolean, inmemory class SubStored(Item): schemaVersion = 1 typeName = 'substoredthing' a = text() b = bytes() class YouCantStartThis(Item, Service): parent = inmemory() running = inmemory() name = inmemory() started = boolean(default=False) def startService(self): self.started = True class YouShouldStartThis(Item, Service): parent = inmemory() running = inmemory() name = inmemory() started = boolean(default=False) def startService(self): self.started = True class SubStoreTest(unittest.TestCase): """ Test on-disk creation of substores. """ def testOneThing(self): """ Ensure that items can be inserted into substores and subsequently retrieved. """ topdb = filepath.FilePath(self.mktemp()) s = Store(topdb) ss = SubStore.createNew(s, ['account', 'bob@divmod.com']) s2 = ss.open() ssd = SubStored(store=s2, a=u'hello world', b='what, its text') oid = ss.storeID oid2 = ssd.storeID s2.close() s.close() reopens = Store(topdb) reopenss = reopens.getItemByID(oid) reopens2 = reopenss.open() reopenssd = reopens2.getItemByID(oid2) self.assertEquals(reopenssd.a, u'hello world') self.assertEquals(reopenssd.b, 'what, its text') def test_oneThingMemory(self): """ Ensure that items put into in-memory substores are retrievable. """ s = Store() ss = SubStore.createNew(s, ['account', 'bob@divmod.com']) s2 = ss.open() ssd = SubStored(store=s2, a=u'hello world', b='what, its text') oid = ss.storeID oid2 = ssd.storeID s2.close() self.assertIdentical(s.getItemByID(oid), ss) self.assertIdentical(ss.open(), s2) item = s2.getItemByID(oid2) self.assertEquals(item.a, u'hello world') self.assertEquals(item.b, 'what, its text') def test_hereTodayGoneTomorrow(self): """ Ensure that substores exist after closing them. """ s = Store() ss = SubStore.createNew(s, ['account', 'bob@divmod.com']) s2 = ss.open() ssd = SubStored(store=s2, a=u'hello world', b='what, its text') oid = ss.storeID oid2 = ssd.storeID s2.close() #the following is done to mimic garbage collection of objects holding #on to substores del s2._openSubStore ss = s.getItemByID(oid) s2 = ss.open() item = s2.getItemByID(oid2) self.assertEquals(item.a, u'hello world') self.assertEquals(item.b, 'what, its text') def test_memorySubstoreFile(self): """ In-memory substores whose stores have file directories should be able to create files. """ filesdir = filepath.FilePath(self.mktemp()) s = Store(filesdir=filesdir) ss = SubStore.createNew(s, ['account', 'bob@divmod.com']) s2 = ss.open() f = s2.newFile("test.txt") f.write("yay") f.close() self.assertEqual(open(f.finalpath.path).read(), "yay") class SubStoreStartupSemantics(unittest.TestCase): """ These tests verify that interactions between store and substore services are correct. They also provide some documentation of expected edge-case behavior. Read the code if you are interested in how to get startup notifications from substore items. """ def setUp(self): """ Set up the tests by creating a store and a substore and opening them both. """ self.topdb = topdb = Store(filepath.FilePath(self.mktemp())) self.ssitem = ssitem = SubStore.createNew( topdb, ["dontstartme", "really"]) self.ss = ssitem.open() self.serviceStarted = False def testDontStartNormally(self): """ Substores' services are not supposed to be started when their parent stores are. """ ss = self.ss ycst = YouCantStartThis(store=ss) ss.powerUp(ycst, IService) self._startService() self.failIf(ycst.started) def testStartEverythingExplicitly(self): """ Substores implement IService themselves, just as regular stores do, via the special-case machinery. """ ss = self.ss ysst = YouShouldStartThis(store=ss) ss.powerUp(ysst, IService) self.topdb.powerUp(self.ssitem, IService) self._startService() self.failUnless(ysst.started) def _startService(self): """ Start the service and make sure we know it's started so tearDown can shut it down. """ assert not self.serviceStarted self.serviceStarted = True return IService(self.topdb).startService() def tearDown(self): """ Stop services that may have been started by these test cases. """ if self.serviceStarted: return IService(self.topdb).stopService() Axiom-0.6.0/axiom/test/test_tablecreate.py0000644000175000017500000000640611010066072020465 0ustar exarkunexarkun""" Tests for table-creation. """ from axiom import item from axiom import attributes from axiom import store from twisted.trial.unittest import TestCase from twisted.python import filepath class A(item.Item): typeName = 'test_table_creator' schemaVersion = 1 attr = attributes.integer(default=3) class SomeError(Exception): """ Dummy error for testing. """ def createAndRaise(s): """ Create an A item, then raise a L{SomeError} (which will revert the transaction). This is because there is no direct API for creating tables. """ A(store=s) raise SomeError() class TableCreationTest(TestCase): """ Tests for table creation. """ def test_committedTableCreation(self): """ When tables are created in a transaction which is committed, they should persist in both Axiom's in-memory schema representation and within the on-disk SQL store. """ storedir = filepath.FilePath(self.mktemp()) s1 = store.Store(storedir) s1.transact(A, store=s1) self.assertIn(A, s1.typeToTableNameCache) s1.close() s2 = store.Store(storedir) self.assertIn(A, s2.typeToTableNameCache) s2.close() def test_revertedTableCreation(self): """ When tables are created in a transaction which is reverted, they should persist in neither the SQL store nor the in-memory schema representation. """ storedir = self.mktemp() s1 = store.Store(storedir) self.assertRaises(SomeError, s1.transact, createAndRaise, s1) self.assertNotIn(A, s1.typeToTableNameCache) s1.close() s2 = store.Store(storedir) self.assertNotIn(A, s2.typeToTableNameCache) def test_differentStoreTableCreation(self): """ If two different stores are opened before a given table is created, and one creates it, this should be transparent to both item creation and queries made from either store. """ storedir = self.mktemp() s1 = store.Store(storedir) s2 = store.Store(storedir) a1 = A(store=s1) a2 = A(store=s2) self.assertEquals(list(s1.query( A, sort=A.storeID.ascending).getColumn("storeID")), [a1.storeID, a2.storeID]) self.assertEquals(list(s2.query( A, sort=A.storeID.ascending).getColumn("storeID")), [a1.storeID, a2.storeID]) def test_dontReadTheSchemaSoMuch(self): """ This is a regression test for a bug in Axiom where the schema was refreshed from SQL every time a table needed to be created, regardless of whether the schema needed to be refreshed or not. In addition to being logically incorrect, this error severely hurt performance. The schema should only be re-read when a change is detected, by way of a table being created in two different Store objects, as in the test above in L{TableCreationTest.test_differentStoreTableCreation}. """ s1 = store.Store(filepath.FilePath(self.mktemp())) def die(): self.fail("schema refreshed unnecessarily called too much") s1._startup = die A(store=s1) Axiom-0.6.0/axiom/test/test_tags.py0000644000175000017500000000154710323267011017153 0ustar exarkunexarkun from twisted.trial import unittest from axiom.store import Store from axiom.tags import Catalog from axiom.item import Item from axiom.attributes import text class Gizmo(Item): typeName = 'test_gizmo' schemaVersion = 1 name = text() class TagTestCase(unittest.TestCase): def testTagging(self): s = Store() c = Catalog(store=s) g1 = Gizmo(store=s, name=u'one') g2 = Gizmo(store=s, name=u'two') c.tag(g1, u'single') c.tag(g1, u'multi') c.tag(g2, u'multi') c.tag(g1, u'multi') self.assertEquals(list(c.tagsOf(g1)), [u'single', u'multi']) self.assertEquals(list(c.tagsOf(g2)), [u'multi']) self.assertEquals(list(c.objectsIn(u'single')), [g1]) self.assertEquals(list(c.objectsIn(u'multi')), [g1, g2]) Axiom-0.6.0/axiom/test/test_unavailable_type.py0000644000175000017500000000130210326362277021543 0ustar exarkunexarkunfrom twisted.trial.unittest import TestCase class UnavailableTypeTestCase(TestCase): def testUnavailable(self): from axiom import attributes, item, store def makeItem(): class MyItem(item.Item): typeName = 'test_deadtype_myitem' schemaVersion = 1 hello = attributes.integer() return MyItem storedir = self.mktemp() theStore = store.Store(storedir) makeItem()(store=theStore) item = reload(item) store = reload(store) store.Store(storedir) testUnavailable.skip = 'This test breaks EVERY subsequent test, because reloading item and store is not allowed' Axiom-0.6.0/axiom/test/test_upgrading.py0000644000175000017500000007061111127421367020204 0ustar exarkunexarkun# Copyright (c) 2008 Divmod. See LICENSE for details. """ Tests for the Axiom upgrade system. """ import sys, StringIO from zope.interface import Interface from zope.interface.verify import verifyObject from twisted.trial import unittest from twisted.python import filepath from twisted.application.service import IService from twisted.internet.defer import maybeDeferred from twisted.python.reflect import namedModule from twisted.python import log from axiom.iaxiom import IAxiomaticCommand from axiom import store, upgrade, item, errors, attributes from axiom.upgrade import _StoreUpgrade from axiom.item import declareLegacyItem from axiom.scripts import axiomatic from axiom.store import Store from axiom.substore import SubStore from axiom.plugins.axiom_plugins import Upgrade from axiom.test.util import CommandStub def axiomInvalidate(itemClass): """ Remove the registered item class from the Axiom module system's memory, including: the item's current schema, legacy schema declarations, and upgraders. This makes it possible, for example, to reload a module without Axiom complaining about it. This API is still in a test module because it is _NOT YET SAFE_ for using while databases are open; it does not interact with open databases' caches, for example. @param itemClass: an Item subclass that you no longer wish to use. """ # Note, be very careful not to use comparison on attributes here. For # example, do not use list.remove(), since it is equality based. -exarkun for cascades in attributes._cascadingDeletes.itervalues(): for i in xrange(len(cascades) - 1, -1, -1): if cascades[i].type is itemClass: del cascades[i] store._typeNameToMostRecentClass.pop(itemClass.typeName, None) for (tnam, schever) in item._legacyTypes.keys(): if tnam == itemClass.typeName: item._legacyTypes.pop((tnam, schever)) for k in upgrade._upgradeRegistry.keys(): if k[0] == itemClass.typeName: upgrade._upgradeRegistry.pop(k) def axiomInvalidateModule(moduleObject): """ Call L{axiomInvalidate} on all Item subclasses defined in a module. """ for v in moduleObject.__dict__.values(): if isinstance(v, item.MetaItem): axiomInvalidate(v) schemaModules = [] def loadSchemaModule(name): schemaModules.append(namedModule(name)) result = schemaModules[-1] choose(None) return result def choose(module=None): """ Choose among the various "adventurer" modules for upgrade tests. @param module: the module object which should next be treated as "current". """ for old in schemaModules: axiomInvalidateModule(old) if module is not None: reload(module) oldapp = loadSchemaModule('axiom.test.oldapp') brokenapp = loadSchemaModule('axiom.test.brokenapp') toonewapp = loadSchemaModule('axiom.test.toonewapp') morenewapp = loadSchemaModule('axiom.test.morenewapp') onestepapp = loadSchemaModule('axiom.test.onestepapp') newapp = loadSchemaModule('axiom.test.newapp') oldpath = loadSchemaModule('axiom.test.oldpath') newpath = loadSchemaModule('axiom.test.newpath') path_postcopy = loadSchemaModule('axiom.test.path_postcopy') deleteswordapp = loadSchemaModule('axiom.test.deleteswordapp') class SchemaUpgradeTest(unittest.TestCase): def setUp(self): self.dbdir = filepath.FilePath(self.mktemp()) def openStore(self, dbg=False): self.currentStore = store.Store(self.dbdir, debug=dbg) return self.currentStore def closeStore(self): self.currentStore.close() self.currentStore = None def startStoreService(self): svc = IService(self.currentStore) svc.getServiceNamed("Batch Processing Controller").disownServiceParent() svc.startService() def _logMessagesFrom(f): L = [] log.addObserver(L.append) d = maybeDeferred(f) def x(ign): log.removeObserver(L.append) return ign return d.addBoth(x).addCallback(lambda ign: L) def callWithStdoutRedirect(f, *a, **kw): """ Redirect stdout and invoke C{f}. @returns: C{(returnValue, stdout}) """ output = StringIO.StringIO() sys.stdout, stdout = output, sys.stdout try: result = f(*a, **kw) finally: sys.stdout = stdout return result, output class SwordUpgradeTest(SchemaUpgradeTest): def tearDown(self): choose(oldapp) def testUnUpgradeableStore(self): self._testTwoObjectUpgrade() choose(toonewapp) self.assertRaises(errors.NoUpgradePathAvailable, self.openStore) def testUpgradeWithMissingVersion(self): playerID, swordID = self._testTwoObjectUpgrade() choose(morenewapp) s = self.openStore() self.startStoreService() def afterUpgrade(result): player = s.getItemByID(playerID, autoUpgrade=False) sword = s.getItemByID(swordID, autoUpgrade=False) self._testPlayerAndSwordState(player, sword) return s.whenFullyUpgraded().addCallback(afterUpgrade) def test_upgradeSkipVersion(self): """ Verify that an upgrader registered to skip a version can execute properly. """ playerID, swordID = self._testTwoObjectUpgrade() choose(onestepapp) s = self.openStore() self.startStoreService() def afterUpgrade(result): player = s.getItemByID(playerID, autoUpgrade=False) sword = s.getItemByID(swordID, autoUpgrade=False) self._testPlayerAndSwordState(player, sword) return s.whenFullyUpgraded().addCallback(afterUpgrade) def test_loggingAtAppropriateTimes(self): """ Verify that log messages show up when we do upgrade work, but then don't when we don't. """ def someLogging(logMessages): ok = False unrelatedMessages = [] for msgdict in logMessages: msgstr = u''.join(msgdict.get('message', ())) if u'finished upgrading' in msgstr: ok = True else: unrelatedMessages.append(msgstr) self.failUnless(ok, "No messages related to upgrading: %r" % (unrelatedMessages,)) s = self.openStore() def afterUpgrade(noLogMessages): for nmsgdict in noLogMessages: mm = u''.join(nmsgdict.get('message', ())) if mm: self.failIfIn(u'finished upgrading', mm) self.startStoreService() return _logMessagesFrom(s.whenFullyUpgraded ).addCallback(afterUpgrade) return _logMessagesFrom(self.testTwoObjectUpgrade_UseService).addCallback(someLogging) def test_basicErrorLogging(self): """ Verify that if an exception is raised in an upgrader, the exception will be logged. """ playerID, swordID = self._testTwoObjectUpgrade() choose(brokenapp) s = self.openStore() self.startStoreService() def checkException(ign): # It's redundant that the errback is called and the failure is # logged. See #2638. loggedErrors = self.flushLoggedErrors(errors.ItemUpgradeError) self.assertEqual(len(loggedErrors), 1) upgradeError = loggedErrors[0] loggedErrors = self.flushLoggedErrors(brokenapp.UpgradersAreBrokenHere) self.assertEqual(len(loggedErrors), 1) originalError = loggedErrors[0] oldType = item.declareLegacyItem( oldapp.Sword.typeName, oldapp.Sword.schemaVersion, {}) e = upgradeError.value self.assertEqual(e.storeID, swordID) self.assertIdentical(e.oldType, oldType) self.assertIdentical(e.newType, brokenapp.Sword) d = s.whenFullyUpgraded() d = self.assertFailure(d, errors.ItemUpgradeError) d.addCallback(checkException) return d def _testTwoObjectUpgrade(self): choose(oldapp) s = self.openStore() self.assertIdentical( store._typeNameToMostRecentClass[oldapp.Player.typeName], oldapp.Player) sword = oldapp.Sword( store=s, name=u'flaming vorpal doom', hurtfulness=7) player = oldapp.Player( store=s, name=u'Milton', sword=sword) self.closeStore() # Perform an adjustment. return player.storeID, sword.storeID def testTwoObjectUpgrade_OuterFirst(self): playerID, swordID = self._testTwoObjectUpgrade() player, sword = self._testLoadPlayerFirst(playerID, swordID) self._testPlayerAndSwordState(player, sword) def testTwoObjectUpgrade_InnerFirst(self): playerID, swordID = self._testTwoObjectUpgrade() player, sword = self._testLoadSwordFirst(playerID, swordID) self._testPlayerAndSwordState(player, sword) def testTwoObjectUpgrade_AutoOrder(self): playerID, swordID = self._testTwoObjectUpgrade() player, sword = self._testAutoUpgrade(playerID, swordID) self._testPlayerAndSwordState(player, sword) def testTwoObjectUpgrade_UseService(self): playerID, swordID = self._testTwoObjectUpgrade() choose(newapp) s = self.openStore() self.startStoreService() # XXX *this* test really needs 10 or so objects to play with in order # to be really valid... def afterUpgrade(result): player = s.getItemByID(playerID, autoUpgrade=False) sword = s.getItemByID(swordID, autoUpgrade=False) self._testPlayerAndSwordState(player, sword) return s.whenFullyUpgraded().addCallback(afterUpgrade) def _testAutoUpgrade(self, playerID, swordID): choose(newapp) s = self.openStore() for dummy in s._upgradeManager.upgradeEverything(): pass player = s.getItemByID(playerID, autoUpgrade=False) sword = s.getItemByID(swordID, autoUpgrade=False) return player, sword def _testLoadPlayerFirst(self, playerID, swordID): # Everything old is new again choose(newapp) s = self.openStore() player = s.getItemByID(playerID) sword = s.getItemByID(swordID) return player, sword def _testLoadSwordFirst(self, playerID, swordID): choose(newapp) s = self.openStore() sword = s.getItemByID(swordID) player = s.getItemByID(playerID) return player, sword def _testPlayerAndSwordState(self, player, sword): assert not player.__legacy__ assert not sword.__legacy__ self.assertEquals(player.name, 'Milton') self.failIf(hasattr(player, 'sword')) self.assertEquals(sword.name, 'flaming vorpal doom') self.assertEquals(sword.damagePerHit, 14) self.failIf(hasattr(sword, 'hurtfulness')) self.assertEquals(sword.owner.storeID, player.storeID) self.assertEquals(type(sword.owner), type(player)) self.assertEquals(sword.owner, player) self.assertEquals(sword.activated, 1) self.assertEquals(player.activated, 1) def test_multipleLegacyVersions(self): """ If multiple legacy schema versions are present, all of them should be upgraded. """ playerID, swordID = self._testTwoObjectUpgrade() choose(newapp) s = self.openStore() self.startStoreService() def afterFirstUpgrade(result): self.closeStore() choose(morenewapp) s = self.openStore() self.startStoreService() return s.whenFullyUpgraded().addCallback(afterSecondUpgrade, s) def afterSecondUpgrade(result, store): player = store.getItemByID(playerID, autoUpgrade=False) sword = store.getItemByID(swordID, autoUpgrade=False) self._testPlayerAndSwordState(player, sword) return s.whenFullyUpgraded().addCallback(afterFirstUpgrade) class SubStoreCompat(SwordUpgradeTest): def setUp(self): self.topdbdir = filepath.FilePath(self.mktemp()) self.subStoreID = None def openStore(self): self.currentTopStore = store.Store(self.topdbdir) if self.subStoreID is not None: self.currentSubStore = self.currentTopStore.getItemByID(self.subStoreID).open() else: ss = SubStore.createNew(self.currentTopStore, ['sub']) self.subStoreID = ss.storeID self.currentSubStore = ss.open() return self.currentSubStore def closeStore(self): self.currentSubStore.close() self.currentTopStore.close() self.currentSubStore = None self.currentTopStore = None def startStoreService(self): svc = IService(self.currentTopStore) svc.getServiceNamed("Batch Processing Controller").disownServiceParent() svc.startService() class PathUpgrade(SchemaUpgradeTest): """ Tests for items with path attributes, using registerAttributeCopyingUpgrader. """ def _runPathUpgrade(self, module): """ Load the 'oldpath' module, then upgrade items created from it to the versions in the specified module. """ axiomInvalidateModule(module) reload(oldpath) self.openStore() nfp = self.currentStore.newFilePath("pathname") oldpath.Path(store=self.currentStore, thePath=nfp) self.closeStore() axiomInvalidateModule(oldpath) reload(module) self.openStore() self.startStoreService() return nfp, self.currentStore.whenFullyUpgraded() def testUpgradePath(self): """ Verify that you can upgrade a path attribute in the simplest possible way. """ nfp, d = self._runPathUpgrade(newpath) def checkPathEquivalence(n): self.assertEquals( self.currentStore.findUnique(newpath.Path).thePath.path, nfp.path) return d.addCallback(checkPathEquivalence) def test_postCopy(self): """ Ensure that a post-copy function, if specified to registerAttributeCopyingUpgrader, is run after item upgrade. """ nfp, d = self._runPathUpgrade(path_postcopy) path2 = nfp.child("foo") def checkPath(_): self.assertEquals( self.currentStore.findUnique(path_postcopy.Path).thePath.path, path2.path) return d.addCallback(checkPath) oldcirc = loadSchemaModule('axiom.test.oldcirc') newcirc = loadSchemaModule('axiom.test.newcirc') oldobsolete = loadSchemaModule('axiom.test.oldobsolete') newobsolete = loadSchemaModule('axiom.test.newobsolete') class IObsolete(Interface): """ Interface representing an undesirable feature. """ class DeletionTest(SchemaUpgradeTest): def testCircular(self): """ If you access an item, B, through a reference on another item, A, which is deleted in the course of B's upgrade, you should still get a reference to B. """ reload(oldcirc) self.openStore() b = oldcirc.B(a=oldcirc.A(store=self.currentStore), store=self.currentStore) b.a.b = b self.closeStore() axiomInvalidateModule(oldcirc) reload(newcirc) self.openStore() origA = self.currentStore.findUnique(newcirc.A) origB = origA.b secondA = self.currentStore.findUnique(newcirc.A) secondB = secondA.b self.assertEquals(origB, secondB) self.assertNotEqual(origA, secondA) def testPowerupsFor(self): """ Powerups deleted during upgrades should be omitted from the results of powerupsFor. """ reload(oldobsolete) self.openStore() o = oldobsolete.Obsolete(store=self.currentStore) self.currentStore.powerUp(o, IObsolete) # sanity check self.assertEquals(IObsolete(self.currentStore), o) self.closeStore() axiomInvalidateModule(oldobsolete) reload(newobsolete) self.openStore() self.assertEquals(list(self.currentStore.powerupsFor(IObsolete)), []) self.closeStore() axiomInvalidateModule(newobsolete) def testPowerupsAdapt(self): """ Powerups deleted during upgrades should be omitted from the results of powerupsFor. """ reload(oldobsolete) self.openStore() o = oldobsolete.Obsolete(store=self.currentStore) self.currentStore.powerUp(o, IObsolete) # sanity check self.assertEquals(IObsolete(self.currentStore), o) self.closeStore() axiomInvalidateModule(oldobsolete) reload(newobsolete) self.openStore() self.assertEquals(IObsolete(self.currentStore, None), None) self.closeStore() axiomInvalidateModule(newobsolete) two_upgrades_old = loadSchemaModule( 'axiom.test.upgrade_fixtures.two_upgrades_old') two_upgrades_new = loadSchemaModule( 'axiom.test.upgrade_fixtures.two_upgrades_new') reentrant_old = loadSchemaModule( 'axiom.test.upgrade_fixtures.reentrant_old') reentrant_new = loadSchemaModule( 'axiom.test.upgrade_fixtures.reentrant_new') override_init_old = loadSchemaModule( 'axiom.test.upgrade_fixtures.override_init_old') override_init_new = loadSchemaModule( 'axiom.test.upgrade_fixtures.override_init_new') replace_attribute_old = loadSchemaModule( 'axiom.test.upgrade_fixtures.replace_attribute_old') replace_attribute_new = loadSchemaModule( 'axiom.test.upgrade_fixtures.replace_attribute_new') replace_delete_old = loadSchemaModule( 'axiom.test.upgrade_fixtures.replace_delete_old') replace_delete_new = loadSchemaModule( 'axiom.test.upgrade_fixtures.replace_delete_new') class DuringUpgradeTests(unittest.TestCase): """ Tests for upgraders' interactions with each other and with the Store while an upgrader is running. """ def tearDown(self): choose(None) dbdir = None currentStore = None def storeWithVersion(self, chosenModule): """ Open a store with a particular module chosen, closing the old store if it was open already. """ choose(chosenModule) if self.currentStore is not None: self.currentStore.close() if self.dbdir is None: self.dbdir = filepath.FilePath(self.mktemp()) self.currentStore = store.Store(self.dbdir) return self.currentStore def test_upgradeLegacyReference(self): """ Let a and b be two items which are being upgraded, instances of item types A and B respectively. a has a reference attribute, x, which points to b. In A's 1to2 upgrader, newA.x is set to oldA.x, which is (at that time) a DummyItem, i.e. an item with __legacy__ set to True. This is a regression test for a bug in this scenario where caching was too aggressive, and a.x would still refer to a legacy item after the upgrade was finished. After performing this upgrade, a.x should refer to a B v2, i.e. an upgraded version of b. """ old = self.storeWithVersion(two_upgrades_old) storeID = two_upgrades_old.Referrer( store=old, referee=two_upgrades_old.Referee(store=old)).storeID new = self.storeWithVersion(two_upgrades_new) referrer = new.getItemByID(storeID) referee = referrer.referee self.assertTrue( isinstance(referee, two_upgrades_new.Referee), "%r is a %r but should be %r" % ( referee, type(referee), two_upgrades_new.Referee)) def test_reentrantUpgraderFailure(self): """ If, while an upgrader is running, it triggers its own upgrade, there should be a loud failure; it's already hard enough to deal with upgrade ordering and querying for legacy items; upgraders cannot reasonably be written to be correct in the face of reentrancy. """ old = self.storeWithVersion(reentrant_old) storeID = reentrant_old.Simple(store=old).storeID new = self.storeWithVersion(reentrant_new) self.assertRaises(errors.UpgraderRecursion, new.getItemByID, storeID) # A whitebox flourish to make sure our state tracking is correct: self.failIf(new._upgradeManager._currentlyUpgrading, "No upgraders should currently be in progress.") def test_overridenInitializerInUpgrader(self): """ A subclass of Item which overrides __init__ should be cached by the end of Item.__init__, so that logic written by the subclass has normal caching semantics. """ old = self.storeWithVersion(override_init_old) storeID = override_init_old.Simple(store=old).storeID new = self.storeWithVersion(override_init_new) upgraded = new.getItemByID(storeID) simpleSelf, simpleGotItem = upgraded.verify self.assertIdentical(upgraded, simpleSelf) self.assertIdentical(upgraded, simpleGotItem) def _reentrantReferenceForeignUpgrader(self, oldModule, newModule): old = self.storeWithVersion(oldModule) storeID = oldModule.Referrer( store=old, referee=oldModule.Referee( store=old, value=oldModule.OLD_VALUE)).storeID new = self.storeWithVersion(newModule) referrer = new.getItemByID(storeID) upgraded = referrer.referee self.assertEqual( upgraded.value, newModule.NEW_VALUE, "Upgraded reference does not have new value.") def test_referenceModifiedByForeignUpgrader(self): """ If the value of a reference on an Item requires an upgrade and the upgrade replaces the value of the reference with a different Item, then evaluating the reference attribute on the referrer should result in the new value of the attribute. """ self._reentrantReferenceForeignUpgrader( replace_attribute_old, replace_attribute_new) def test_cascadingDeletedReferenceModifiedByForeignUpgrader(self): """ If the value of a whenDeleted=CASCADE reference on an Item requires an upgrade and the upgrade replaces the value of the reference with a new Item and then deletes the old value of the reference, then evaluating the reference attribute on the referrer should result in the new value of the attribute. """ self._reentrantReferenceForeignUpgrader( replace_delete_old, replace_delete_new) class AxiomaticUpgradeTest(unittest.TestCase): """ L{Upgrade} implements an I{axiomatic} subcommand for synchronously upgrading all items in a store. """ def setUp(self): """ Create a temporary on-disk Store and an instance of L{Upgrade}. """ self.dbdir = self.mktemp() self.store = store.Store(self.dbdir) def tearDown(self): """ Close the temporary Store. """ self.store.close() def test_providesCommandInterface(self): """ L{Upgrade} provides L{IAxiomaticCommand}. """ self.assertTrue(verifyObject(IAxiomaticCommand, Upgrade)) def test_axiomaticSubcommand(self): """ L{Upgrade} is available as a subcommand of I{axiomatic}. """ subCommands = axiomatic.Options().subCommands [options] = [cmd[2] for cmd in subCommands if cmd[0] == 'upgrade'] self.assertIdentical(options, Upgrade) def test_successOutput(self): """ Upon successful completion of the upgrade, L{Upgrade} writes a success message to stdout. """ cmd = Upgrade() cmd.parent = CommandStub(self.store, 'upgrade') result, output = callWithStdoutRedirect(cmd.parseOptions, []) self.assertEqual(output.getvalue(), 'Upgrade complete\n') def test_axiomaticUpgradeEverything(self): """ L{Upgrade.upgradeStore} upgrades all L{Item}s. """ choose(oldapp) swordID = oldapp.Sword( store=self.store, name=u'broadsword', hurtfulness=5).storeID self.store.close() choose(deleteswordapp) cmd = Upgrade() cmd.parent = CommandStub(store.Store(self.dbdir), 'upgrade') result, output = callWithStdoutRedirect( cmd.parseOptions, ['--count', '100']) self.store = store.Store(self.dbdir) self.assertRaises( KeyError, self.store.getItemByID, swordID, autoUpgrade=False) def test_axiomaticUpgradeExceptionBubbling(self): """ Exceptions encountered by L{Upgrade.upgradeStore} are handled and re-raised as L{errors.ItemUpgradeError} with attributes indicating which L{Item} was being upgraded when the exception occurred. """ choose(oldapp) swordID = oldapp.Sword( store=self.store, name=u'longsword', hurtfulness=4).storeID self.store.close() choose(brokenapp) self.store = store.Store(self.dbdir) cmd = Upgrade() cmd.parent = CommandStub(self.store, 'upgrade') cmd.count = 100 err = self.assertRaises( errors.ItemUpgradeError, callWithStdoutRedirect, cmd.upgradeStore, self.store) self.assertTrue( err.originalFailure.check(brokenapp.UpgradersAreBrokenHere)) oldType = item.declareLegacyItem( oldapp.Sword.typeName, oldapp.Sword.schemaVersion, {}) self.assertEqual(err.storeID, swordID) self.assertIdentical(err.oldType, oldType) self.assertIdentical(err.newType, brokenapp.Sword) def test_axiomaticUpgradePerformFails(self): """ If an exception occurs while upgrading items, L{Upgrade.postOptions} reports the item and schema version for which it occurred and returns without exception. """ choose(oldapp) swordID = oldapp.Sword( store=self.store, name=u'rapier', hurtfulness=3).storeID self.store.close() choose(brokenapp) self.store = store.Store(self.dbdir) cmd = Upgrade() cmd.parent = CommandStub(self.store, 'upgrade') result, output = callWithStdoutRedirect( cmd.parseOptions, ['--count', '100']) lines = output.getvalue().splitlines() # Ensure that the original error is output. self.assertEqual(lines[0], 'Upgrader error:') self.assertTrue(len(lines) > 2) oldType = oldapp.Sword newType = store._typeNameToMostRecentClass[oldType.typeName] msg = cmd.errorMessageFormat % ( oldType.typeName, swordID, oldType.schemaVersion, newType.schemaVersion) self.assertTrue(lines[-1].startswith(msg)) def test_upgradeStoreRecursing(self): """ L{Upgrade} upgrades L{Item}s in substores. """ choose(oldapp) ss1 = SubStore.createNew(self.store, ['a']) ss2 = SubStore.createNew(self.store, ['b']) swordIDs = [ (ss1.storeID, oldapp.Sword(store=ss1.open(), name=u'foo').storeID), (ss2.storeID, oldapp.Sword(store=ss2.open(), name=u'bar').storeID)] del ss1, ss2 self.store.close() choose(deleteswordapp) self.store = store.Store(self.dbdir) cmd = Upgrade() cmd.parent = CommandStub(self.store, 'upgrade') callWithStdoutRedirect(cmd.parseOptions, []) for (ssid, swordID) in swordIDs: self.assertRaises( KeyError, self.store.getItemByID(ssid).open().getItemByID, swordID) class StoreUpgradeTests(unittest.TestCase): """ Tests for L{upgrade._StoreUgprade}. """ def setUp(self): self.store = Store() self._upgrader = _StoreUpgrade(self.store) def test_queueMultipleVersions(self): """ If multiple schema versions are queued for upgrade, upgrades should be attempted for all of them (but only attempted once per version). """ legacy1 = declareLegacyItem('test_type', 1, {}) legacy2 = declareLegacyItem('test_type', 2, {}) self._upgrader.queueTypeUpgrade(legacy1) self._upgrader.queueTypeUpgrade(legacy2) self._upgrader.queueTypeUpgrade(legacy2) self.assertEqual(len(self._upgrader._oldTypesRemaining), 2) Axiom-0.6.0/axiom/test/test_userbase.py0000644000175000017500000005212211224737657020044 0ustar exarkunexarkun """ Tests for L{axiom.userbase}. """ import datetime, StringIO, sys from zope.interface import Interface, implements from zope.interface.verify import verifyObject from twisted.trial import unittest from twisted.internet.defer import maybeDeferred from twisted.cred.portal import Portal, IRealm from twisted.cred.checkers import ICredentialsChecker from twisted.cred.error import UnauthorizedLogin from twisted.cred.credentials import IUsernamePassword, IUsernameHashedPassword from twisted.cred.credentials import UsernamePassword from twisted.python.filepath import FilePath from epsilon.extime import Time from axiom.iaxiom import IScheduler from axiom.store import Store from axiom.substore import SubStore from axiom.scheduler import TimedEvent, _SubSchedulerParentHook from axiom import userbase from axiom.item import Item from axiom.attributes import integer from axiom.scripts import axiomatic from axiom import errors from axiom import dependency class IGarbage(Interface): pass class GarbageProtocolHandler(Item): schemaVersion = 1 typeName = 'test_login_garbage' powerupInterfaces = (IGarbage,) garbage = integer() implements(IGarbage) SECRET = 'bananas' class UserBaseTest(unittest.TestCase): """ Tests for L{axiom.userbase} with an on-disk store. @ivar store: The C{Store} object for the items tested. """ def setUp(self): """ Set up for testing with an on-disk store. """ self.store = Store(FilePath(self.mktemp())) def logInAndCheck(self, username, domain='localhost'): """ Ensure that logging in via cred succeeds based on the accounts managed by L{axiom.userbase.LoginSystem}. """ s = self.store def _speedup(): l = userbase.LoginSystem(store=s) dependency.installOn(l, s) s.checkpoint() p = Portal(IRealm(s), [ICredentialsChecker(s)]) a = l.addAccount(username, 'localhost', SECRET) gph = GarbageProtocolHandler(store=a.avatars.open(), garbage=0) dependency.installOn(gph, gph.store) return p, gph p, gph = s.transact(_speedup) def wasItGph((interface, avatar, logout)): self.assertEquals(interface, IGarbage) self.assertEquals(avatar, gph) logout() return p.login(UsernamePassword('bob@localhost', SECRET), None, IGarbage ).addCallback(wasItGph) def testBasicLogin(self): self.logInAndCheck('bob') def testUppercaseLogin(self): self.logInAndCheck('BOB') def testMixedCaseLogin(self): self.logInAndCheck('BoB') class MemoryUserBaseTest(UserBaseTest): """ Tests for L{axiom.userbase} with an in-memory store. @ivar store: The C{Store} object for the items tested. """ def setUp(self): """ Set up for testing with an in-memory store. """ self.store = Store() class CommandTestCase(unittest.TestCase): """ Integration tests for the 'axiomatic userbase' command. """ def setUp(self): self.dbdir = FilePath(self.mktemp()) self.store = Store(self.dbdir) def tearDown(self): self.store.close() def _login(self, avatarId, password): cc = ICredentialsChecker(self.store) p = Portal(IRealm(self.store), [cc]) return p.login(UsernamePassword(avatarId, password), None, lambda orig, default: orig) def assertImplements(self, obj, interface): """ Assert that C{obj} can be adapted to C{interface}. @param obj: Any Python object. @param interface: A L{zope.interface.Interface} that C{obj} should implement. """ self.failUnless(interface.providedBy(interface(obj, None))) def userbase(self, *args): """ Run 'axiomatic userbase' with the given arguments on database at C{dbdir}. @return: A list of lines printed to stdout by the axiomatic command. """ output = StringIO.StringIO() sys.stdout, stdout = output, sys.stdout try: axiomatic.main(['-d', self.dbdir.path, 'userbase'] + list(args)) finally: sys.stdout = stdout return output.getvalue().splitlines() def test_install(self): """ Create a database, install userbase and check that the store implements L{IRealm} and L{ICredentialsChecker}. i.e. that userbase has been installed. This is an integration test. """ self.userbase('install') self.assertImplements(self.store, IRealm) self.assertImplements(self.store, ICredentialsChecker) def test_userCreation(self): """ Create a user on a store, implicitly installing userbase, then try to log in with the user. This is an integration test. """ self.userbase('create', 'alice', 'localhost', SECRET) def cb((interface, avatar, logout)): ss = avatar.avatars.open() self.assertEquals(list(userbase.getAccountNames(ss)), [(u'alice', u'localhost')]) self.assertEquals(avatar.password, SECRET) logout() d = self._login('alice@localhost', SECRET) return d.addCallback(cb) def test_listOnClean(self): """ Check that we are given friendly and informative output when we use 'userbase list' on a fresh store. """ output = self.userbase('list') self.assertEquals(output, ['No accounts']) def test_list(self): """ When users exist, 'userbase list' should print their IDs one to a line. """ self.userbase('create', 'alice', 'localhost', SECRET) self.userbase('create', 'bob', 'localhost', SECRET) output = self.userbase('list') self.assertEquals(output, ['alice@localhost', 'bob@localhost']) def test_listWithDisabled(self): """ Check that '[DISABLED]' is printed after the ID of users with disabled accounts. """ self.userbase('create', 'alice', 'localhost', SECRET) self.userbase('create', 'bob', 'localhost', SECRET) def cb((interface, avatar, logout)): avatar.disabled = 1 output = self.userbase('list') self.assertEquals(output, ['alice@localhost', 'bob@localhost [DISABLED]']) return self._login('bob@localhost', SECRET).addCallback(cb) def test_listOffering(self): """ Mantissa offerings are added as users with a 'username' but no domain. Check that the 'list' command prints these correctly. """ name = 'offering-name' self.userbase('install') realm = IRealm(self.store) substoreItem = SubStore.createNew(self.store, ('app', name)) realm.addAccount(name, None, None, internal=True, avatars=substoreItem) output = self.userbase('list') self.assertEquals(output, [name]) def pvals(m): d = m.persistentValues() d.pop('account') return d class AccountTestCase(unittest.TestCase): def testAccountNames(self): dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) dependency.installOn(ls, s) acc = ls.addAccount('username', 'dom.ain', 'password') ss = acc.avatars.open() self.assertEquals( list(userbase.getAccountNames(ss)), [('username', 'dom.ain')]) acc.addLoginMethod(u'nameuser', u'ain.dom') names = list(userbase.getAccountNames(ss)) names.sort() self.assertEquals( names, [('nameuser', 'ain.dom'), ('username', 'dom.ain')]) def testGetLoginMethods(self): """ Test L{userbase.getLoginMethods} """ dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) dependency.installOn(ls, s) acc = ls.addAccount('username', 'dom.ain', 'password', protocol=u'speech') ss = acc.avatars.open() for protocol in (None, u'speech'): self.assertEquals(list(userbase.getAccountNames(ss, protocol)), [('username', 'dom.ain')]) # defaults to ANY_PROTOCOL acc.addLoginMethod(u'username2', u'dom.ain') # check that searching for protocol=speech also gives us the # ANY_PROTOCOL LoginMethod for protocol in (None, u'speech'): self.assertEquals(sorted(userbase.getAccountNames(ss, protocol)), [('username', 'dom.ain'), ('username2', 'dom.ain')]) def testAvatarStoreState(self): """ You can only pass an 'avatars' argument if it doesn't already have an account in it. Some accounts want to have their stores in slightly odd places (like offering.py) but you can't have two accounts added which both point to the same store. """ dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) dependency.installOn(ls, s) acc = ls.addAccount('alice', 'dom.ain', 'password') # this is allowed, if weird unrelatedAccount = ls.addAccount( 'elseice', 'dom.ain', 'password', avatars=SubStore.createNew(s, ('crazy', 'what'))) # this is not allowed. self.assertRaises(errors.DuplicateUniqueItem, ls.addAccount, 'bob', 'ain.dom', 'xpassword', avatars=acc.avatars) # Make sure that our stupid call to addAccount did not corrupt # anything, because we are stupid self.assertEquals(acc.avatars.open().query(userbase.LoginAccount).count(), 1) def testParallelLoginMethods(self): dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) acc = ls.addAccount(u'username', u'example.com', u'password') ss = acc.avatars.open() loginMethods = s.query(userbase.LoginMethod) subStoreLoginMethods = ss.query(userbase.LoginMethod) self.assertEquals(loginMethods.count(), 1) self.assertEquals( [pvals(m) for m in loginMethods], [pvals(m) for m in subStoreLoginMethods]) def testSiteLoginMethodCreator(self): dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) acc = ls.addAccount(u'username', u'example.com', u'password') # Do everything twice to make sure repeated calls don't corrupt state # somehow for i in [0, 1]: acc.addLoginMethod( localpart=u'anothername', domain=u'example.org', verified=True, protocol=u'test', internal=False) loginMethods = s.query( userbase.LoginMethod, sort=userbase.LoginMethod.storeID.ascending) subStoreLoginMethods = acc.avatars.open().query( userbase.LoginMethod, sort=userbase.LoginMethod.storeID.ascending) self.assertEquals(loginMethods.count(), 2) self.assertEquals( [pvals(m) for m in loginMethods], [pvals(m) for m in subStoreLoginMethods]) def testUserLoginMethodCreator(self): dbdir = FilePath(self.mktemp()) s = Store(dbdir) ls = userbase.LoginSystem(store=s) acc = ls.addAccount(u'username', u'example.com', u'password') ss = acc.avatars.open() subStoreLoginAccount = ss.findUnique(userbase.LoginAccount) # Do everything twice to make sure repeated calls don't corrupt state # somehow for i in [0, 1]: subStoreLoginAccount.addLoginMethod( localpart=u'anothername', domain=u'example.org', verified=True, protocol=u'test', internal=False) loginMethods = s.query( userbase.LoginMethod, sort=userbase.LoginMethod.storeID.ascending) subStoreLoginMethods = ss.query( userbase.LoginMethod, sort=userbase.LoginMethod.storeID.ascending) self.assertEquals(loginMethods.count(), 2) self.assertEquals( [pvals(m) for m in loginMethods], [pvals(m) for m in subStoreLoginMethods]) def testDomainNames(self): s = Store() acc = s for localpart, domain, internal in [ (u'local', u'example.com', True), (u'local', u'example.net', True), (u'remote', u'example.org', False), (u'another', u'example.com', True), (u'brokenguy', None, True)]: userbase.LoginMethod( store=s, localpart=localpart, domain=domain, verified=True, account=s, protocol=u'test', internal=internal) self.assertEquals(userbase.getDomainNames(s), [u"example.com", u"example.net"]) class ThingThatMovesAround(Item): typeName = 'test_thing_that_moves_around' schemaVersion = 1 superValue = integer() def run(): pass class SubStoreMigrationTestCase(unittest.TestCase): IMPORTANT_VALUE = 159 localpart = u'testuser' domain = u'example.com' def setUp(self): self.dbdir = FilePath(self.mktemp()) self.store = Store(self.dbdir) self.ls = userbase.LoginSystem(store=self.store) self.scheduler = IScheduler(self.store) self.account = self.ls.addAccount( self.localpart, self.domain, u'PASSWORD') self.accountStore = self.account.avatars.open() self.ss = IScheduler(self.accountStore) self.origdir = self.accountStore.dbdir self.destdir = FilePath(self.mktemp()) def test_extraction(self): """ Ensure that user store extraction works correctly, particularly in the presence of timed events. """ thing = ThingThatMovesAround(store=self.accountStore, superValue=self.IMPORTANT_VALUE) self.ss.schedule(thing, Time() + datetime.timedelta(days=1)) self.test_noTimedEventsExtraction() def test_noTimedEventsExtraction(self): """ Ensure that user store extraction works correctly if no timed events are present. """ userbase.extractUserStore(self.account, self.destdir) self.assertEquals( self.ls.accountByAddress(self.localpart, self.domain), None) self.failIf(list(self.store.query(SubStore, SubStore.storepath == self.origdir))) self.origdir.restat(False) self.failIf(self.origdir.exists()) self.failIf(list(self.store.query(_SubSchedulerParentHook))) def test_noTimedEventsInsertion(self): """ Test that inserting a user store succeeds if it contains no timed events. """ self.test_noTimedEventsExtraction() self._testInsertion() def test_insertion(self, _deleteDomainDirectory=False): """ Test that inserting a user store succeeds and that the right items are placed in the site store as a result. """ self.test_extraction() self._testInsertion(_deleteDomainDirectory) insertedStore = self.ls.accountByAddress(self.localpart, self.domain).avatars.open() self.assertEquals( insertedStore.findUnique(ThingThatMovesAround).superValue, self.IMPORTANT_VALUE) siteStoreSubRef = self.store.getItemByID(insertedStore.idInParent) ssph = self.store.findUnique(_SubSchedulerParentHook, _SubSchedulerParentHook.subStore == siteStoreSubRef, default=None) self.failUnless(ssph) self.failUnless(self.store.findUnique(TimedEvent, TimedEvent.runnable == ssph)) def _testInsertion(self, _deleteDomainDirectory=False): """ Helper method for inserting a user store. """ if _deleteDomainDirectory: self.store.filesdir.child('account').child(self.domain).remove() userbase.insertUserStore(self.store, self.destdir) def test_insertionWithNoDomainDirectory(self): """ Test that inserting a user store succeeds even if it is the first one in that domain to be inserted. """ self.test_insertion(True) class RealmTestCase(unittest.TestCase): """ Tests for the L{IRealm} implementation in L{axiom.userbase}. """ localpart = u'testuser' domain = u'example.com' password = u'password' def setUp(self): self.store = Store() self.realm = userbase.LoginSystem(store=self.store) dependency.installOn(self.realm, self.store) def test_powerup(self): """ Test that L{LoginSystem} powers up the store for L{IRealm}. """ self.assertIdentical(self.realm, IRealm(self.store)) def _requestAvatarId(self, credentials): return maybeDeferred(self.realm.requestAvatarId, credentials) def test_requestNonexistentAvatarId(self): """ Test that trying to authenticate as a user who does not exist fails with a L{NoSuchUser} exception. """ username = u'%s@%s' % (self.localpart, self.domain) d = self._requestAvatarId( UsernamePassword(username, self.password)) return self.assertFailure(d, errors.NoSuchUser) def test_requestMalformedAvatarId(self): """ Test that trying to authenticate as a user without specifying a hostname fails with a L{NoSuchUser} exception. """ d = self._requestAvatarId( UsernamePassword(self.localpart, self.password)) return self.assertFailure(d, errors.MissingDomainPart) def test_usernamepassword(self): """ L{LoginSystem.requestAvatarId} returns the store identifier of the L{LoginAccount} associated with a L{UsernamePassword} credentials object if the username and password identify an existing account. """ account = self.realm.addAccount( self.localpart, self.domain, self.password) username = u'%s@%s' % (self.localpart, self.domain) d = self._requestAvatarId(UsernamePassword(username, self.password)) d.addCallback(self.assertEqual, account.storeID) return d def test_usernamepasswordInvalid(self): """ L{LoginSystem.requestAvatarId} fails with L{UnauthorizedLogin} if the password supplied with the L{UsernamePassword} credentials is not valid for the provided username. """ account = self.realm.addAccount( self.localpart, self.domain, self.password) username = u'%s@%s' % (self.localpart, self.domain) d = self._requestAvatarId(UsernamePassword(username, u'blahblah')) self.assertFailure(d, UnauthorizedLogin) return d def test_preauthenticated(self): """ L{LoginSystem.requestAvatarId} returns the store identifier of the L{LoginAccount} associated with a L{Preauthenticated} credentials object. """ account = self.realm.addAccount( self.localpart, self.domain, self.password) username = u'%s@%s' % (self.localpart, self.domain) d = self._requestAvatarId(userbase.Preauthenticated(username)) d.addCallback(self.assertEqual, account.storeID) return d class PreauthenticatedTests(unittest.TestCase): """ Tests for L{userbase.Preauthenticated}. """ def test_repr(self): """ L{userbase.Preauthenticated} has a repr which identifies its type and its user. """ self.assertEqual( repr(userbase.Preauthenticated(u'foo@bar')), '') def test_usernamepassword(self): """ L{Preauthenticated} implements L{IUsernamePassword} and succeeds all authentication checks. """ creds = userbase.Preauthenticated(u'foo@bar') self.assertTrue( verifyObject(IUsernamePassword, creds), "Preauthenticated does not implement IUsernamePassword") self.assertTrue( creds.checkPassword('random string'), "Preauthenticated did not accept an arbitrary password.") def test_usernamehashedpassword(self): """ L{Preauthenticated} implements L{IUsernameHashedPassword} and succeeds all authentication checks. """ creds = userbase.Preauthenticated(u'foo@bar') self.assertTrue( verifyObject(IUsernameHashedPassword, creds), "Preauthenticated does not implement IUsernameHashedPassword") self.assertTrue( creds.checkPassword('arbitrary bytes'), "Preauthenticated did not accept an arbitrary password.") Axiom-0.6.0/axiom/test/test_xatop.py0000644000175000017500000011035111117560627017355 0ustar exarkunexarkun# Copyright 2008 Divmod, Inc. See LICENSE for details import sys import os from twisted.trial import unittest from twisted.internet import protocol, defer from twisted.python.util import sibpath from twisted.python import log, filepath from epsilon import extime from axiom import attributes, item, store, errors from axiom.iaxiom import IStatEvent from pysqlite2.dbapi2 import sqlite_version_info class RevertException(Exception): pass class TestItem(item.Item): schemaVersion = 1 typeName = 'TestItem' foo = attributes.integer(indexed=True, default=10) bar = attributes.text() baz = attributes.timestamp() other = attributes.reference() booleanT = attributes.boolean() booleanF = attributes.boolean() activated = attributes.inmemory() checkactive = attributes.inmemory() checked = attributes.inmemory() myStore = attributes.reference() attributes.compoundIndex(bar, baz) def activate(self): self.activated = True if getattr(self, 'checkactive', False): assert isinstance(self.other, TestItem), repr(self.other) assert self.other != self, repr(self.other) self.checked = True class StoreTests(unittest.TestCase): def testCreation(self): dbdir = filepath.FilePath(self.mktemp()) s = store.Store(dbdir) s.close() def testReCreation(self): dbdir = filepath.FilePath(self.mktemp()) s = store.Store(dbdir) s.close() s = store.Store(dbdir) s.close() def test_onlyOneDir(self): """ A Store should raise an error if both dbdir and filesdir are specified. """ self.assertRaises(ValueError, store.Store, filepath.FilePath(self.mktemp()), filesdir=filepath.FilePath(self.mktemp())) def testTableQueryCaching(self): """ Ensure that the identity of the string returned by the mostly-private getTableQuery method is the same when it is invoked for the same type and version, rather than a newly constructed string. """ s = store.Store() self.assertIdentical( s.getTableQuery(TestItem.typeName, 1), s.getTableQuery(TestItem.typeName, 1)) def testTypeToDatabaseNames(self): # The real purpose of this test is to have the new get*Name # methods explicitely called somewhere in the test suite. The # effect itself does not actually matter much. These functions # are proven right by the fact that item creation, querying # and update are working. # I think the following should be ok for anything that vaguely # ressembles SQL. s = store.Store() tn = s.getTableName(TestItem) assert tn.startswith(s.databaseName) cn = s.getColumnName(TestItem.foo) scn = s.getShortColumnName(TestItem.foo) assert len(tn) < len(cn) assert len(scn) < len(cn) assert cn.endswith(scn) assert cn.startswith(tn) icn = s.getColumnName(TestItem.storeID) sicn = s.getShortColumnName(TestItem.storeID) assert len(tn) < len(icn) assert len(sicn) < len(icn) assert icn.endswith(sicn) assert icn.startswith(tn) def testGetTableName(self): """ Item instances were getting into the table name cache. Make sure only classes are accepted. """ s = store.Store() self.assertRaises(errors.ItemClassesOnly, s.getTableName, TestItem(store=s)) def testTableNameCacheDoesntGrow(self): """ Make sure the table name cache doesn't grow out of control anymore. """ s = store.Store() tn = s.getTableName(TestItem) x = len(s.typeToTableNameCache) for i in range(10): s.getTableName(TestItem) self.assertEquals(x, len(s.typeToTableNameCache)) def testStoreIDComparerIdentity(self): # We really want this to hold, because the storeID object is # used like a regular attribute as a key for various caching # within store. a0 = TestItem.storeID a1 = TestItem.storeID self.assertIdentical(a0, a1) def test_loadTypeSchema(self): """ L{Store._loadTypeSchema} returns a C{dict} mapping item type names and versions to a list of tuples giving information about the on-disk schema information for each attribute of that version of that type. """ s = store.Store() TestItem(store=s) loadedSchema = s._loadTypeSchema() self.assertEqual( loadedSchema[(TestItem.typeName, TestItem.schemaVersion)], [('bar', 'TEXT COLLATE NOCASE', False, attributes.text, ''), ('baz', 'INTEGER', False, attributes.timestamp, ''), ('booleanF', 'BOOLEAN', False, attributes.boolean, ''), ('booleanT', 'BOOLEAN', False, attributes.boolean, ''), ('foo', 'INTEGER', True, attributes.integer, ''), ('myStore', 'INTEGER', True, attributes.reference, ''), ('other', 'INTEGER', True, attributes.reference, ''), ]) def test_checkInconsistentTypeSchema(self): """ L{Store._checkTypeSchemaConsistency} raises L{RuntimeError} if the in memory schema of the type passed to it differs from the schema stored in the database for that type, either by including too few attributes, too many attributes, or the wrong type for one of the attributes. """ s = store.Store() schema = [ (name, attr.sqltype, attr.indexed, attr, attr.doc) for (name, attr) in TestItem.getSchema()] # Test a missing attribute self.assertRaises( RuntimeError, s._checkTypeSchemaConsistency, TestItem, {(TestItem.typeName, TestItem.schemaVersion): schema[:-1]}) # And an extra attribute self.assertRaises( RuntimeError, s._checkTypeSchemaConsistency, TestItem, {(TestItem.typeName, TestItem.schemaVersion): schema + [schema[0]]}) # And the wrong type for one of the attributes self.assertRaises( RuntimeError, s._checkTypeSchemaConsistency, TestItem, {(TestItem.typeName, TestItem.schemaVersion): [(schema[0], 'VARCHAR(64) (this is made up)', schema[2], schema[3], schema[4])] + schema[1:]}) def test_inMemorySchemaCacheReset(self): """ The global in-memory table schema cache should not change the behavior of consistency checking with respect to the redefinition of in-memory schemas. This test is verifying the behavior which is granted by the use of a WeakKeyDictionary for _inMemorySchemaCache. If that cache kept strong references to item types or used a (typeName, schemaVersion) key, either the second C{SoonToChange} class definition in this method would fail or the schema defined by the first C{SoonToChange} class would be used, even after it should have been replaced by the second definition. """ class SoonToChange(item.Item): attribute = attributes.integer() dbpath = self.mktemp() s = store.Store(dbpath) SoonToChange(store=s) s.close() # This causes a Store._checkTypeSchemaConsistency to cache # SoonToChange. s = store.Store(dbpath) s.close() del SoonToChange, s class SoonToChange(item.Item): attribute = attributes.boolean() self.assertRaises(RuntimeError, store.Store, dbpath) def test_checkOutdatedTypeSchema(self): """ L{Store._checkTypeSchemaConsistency} raises L{RuntimeError} if the type passed to it is the most recent in-memory version of that type and is older than the newest on disk schema for that type. """ s = store.Store() schema = [ (name, attr.sqltype, attr.indexed, attr, attr.doc) for (name, attr) in TestItem.getSchema()] self.assertRaises( RuntimeError, s._checkTypeSchemaConsistency, TestItem, {(TestItem.typeName, TestItem.schemaVersion): schema, (TestItem.typeName, TestItem.schemaVersion + 1): schema}) def test_checkConsistencyWhenOpened(self): """ L{Store.__init__} checks the consistency of the schema and raises L{RuntimeError} for any inconsistency. """ class SoonToChange(item.Item): attribute = attributes.integer() dbpath = self.mktemp() s = store.Store(dbpath) SoonToChange(store=s) s.close() # Get rid of both the type and the store so that we can define a new # incompatible version. It might be nice if closed stores didn't keep # references to types, but whatever. This kind of behavior isn't # really supported, only the unit tests need to do it for now. del SoonToChange, s class SoonToChange(item.Item): attribute = attributes.boolean() self.assertRaises(RuntimeError, store.Store, dbpath) def test_createAndLoadExistingIndexes(self): """ L{Store._loadExistingIndexes} returns a C{set} containing the names of all of the indexes which exist in the store. """ s = store.Store() before = s._loadExistingIndexes() TestItem(store=s) after = s._loadExistingIndexes() created = after - before self.assertEqual( created, set([s._indexNameOf(TestItem, ['foo']), s._indexNameOf(TestItem, ['other']), s._indexNameOf(TestItem, ['myStore']), s._indexNameOf(TestItem, ['bar', 'baz'])])) def test_loadExistingAttachedStoreIndexes(self): """ If a store is attached to its parent, L{Store._loadExistingIndexes} returns just the indexes which exist in the child store. """ secondaryPath = self.mktemp() main = store.Store() secondary = store.Store(secondaryPath, parent=main, idInParent=17) TestItem(store=secondary) before = secondary._loadExistingIndexes() secondary.attachToParent() after = secondary._loadExistingIndexes() self.assertEqual(before, after) def test_createAttachedStoreIndexes(self): """ Indexes created by the insertion of the first item of a type into a store are created in the database which backs that store even if that store is attached to another store backed by a different database. """ secondaryPath = self.mktemp() main = store.Store() secondary = store.Store(secondaryPath, parent=main, idInParent=23) before = secondary._loadExistingIndexes() secondary.attachToParent() TestItem(store=secondary) # Close it to detach from the parent. Detach from the parent so we can # re-open and hopefully avoid accidentally getting any results polluted # by the parent store (shouldn't happen, because the above test, # test_loadExistingAttachedStoreIndexes, makes sure we can inspect # indexes without worrying about an attached parent, but I'm being # paranoid). secondary.close() del secondary secondary = store.Store(secondaryPath, parent=main, idInParent=23) after = secondary._loadExistingIndexes() self.assertEqual( after - before, set([secondary._indexNameOf(TestItem, ['foo']), secondary._indexNameOf(TestItem, ['other']), secondary._indexNameOf(TestItem, ['myStore']), secondary._indexNameOf(TestItem, ['bar', 'baz'])])) def test_inMemoryIndexCacheReset(self): """ The global in-memory index schema cache should not change the behavior of index creation with respect to the redefinition of in-memory schemas. This test is verifying the behavior which is granted by the use of a WeakKeyDictionary for _requiredTableIndexes. If that cache kept strong references to item types or used a (typeName, schemaVersion) key, either the second C{SoonToChange} class definition in this method would fail or the indexes on the schema defined by the first C{SoonToChange} class would be used, even after it should have been replaced by the second definition. """ class SoonToChange(item.Item): attribute = attributes.integer() dbpath = self.mktemp() s = store.Store(dbpath) before = s._loadExistingIndexes() SoonToChange(store=s) after = s._loadExistingIndexes() # Sanity check - this version of SoonToChange has no indexes. self.assertEqual(before, after) s.close() del SoonToChange, s class SoonToChange(item.Item): attribute = attributes.boolean(indexed=True) s = store.Store() before = s._loadExistingIndexes() SoonToChange(store=s) after = s._loadExistingIndexes() self.assertEqual( after - before, set([s._indexNameOf(SoonToChange, ['attribute'])])) def test_loadPythonModuleHint(self): """ If the Python definition of a type found in a Store has not yet been loaded, the hint in the I{module} column in type table is loaded. """ # Arbitrary constants used in multiple places and processes. typeCount = 3 magicOffset = 17 baseModuleName = "axiom_unloaded_module_" # Path the temporary new modules will be created in. importPath = filepath.FilePath(self.mktemp()) importPath.makedirs() sys.path.insert(0, importPath.path) self.addCleanup(sys.path.remove, importPath.path) # Path the store will be created at. dbdir = filepath.FilePath(self.mktemp()) # Create some source files, each defining an item type. for counter in range(typeCount): moduleName = baseModuleName + str(counter) # Sanity check - the test can't work if any of the modules is # already imported. self.assertNotIn(moduleName, sys.modules) # Write out the source. modulePath = importPath.child(moduleName + ".py") modulePath.setContent("""\ from axiom.item import Item from axiom.attributes import integer class Unloaded(Item): value = integer() """) # In another process, so as not to cause the unloaded modules to be # loaded in this process, create a store containing instances of the # Unloaded types. script = filepath.FilePath(self.mktemp()) script.setContent("""\ from sys import argv from twisted.python.reflect import namedAny from axiom.store import Store dbdir, typeCount, moduleBase, magicOffset = argv[1:] s = Store(dbdir) for i in range(int(typeCount)): moduleName = moduleBase + str(i) namedAny(moduleName).Unloaded(store=s, value=int(magicOffset) + i) s.close() """) os.system(" ".join([ "PYTHONPATH=%s:$PYTHONPATH" % (importPath.path,), sys.executable, script.path, dbdir.path, str(typeCount), baseModuleName, str(magicOffset)])) # Another sanity check. The modules still better not have been # imported in this process. for counter in range(typeCount): self.assertNotIn(baseModuleName + str(counter), sys.modules) # Now open the store here. This only works if the Store figures out it # needs to import the modules defining the types. s = store.Store(dbdir.path) # And to be sure, find the item and make sure it has the value we # expect. for counter in range(typeCount): Unloaded = sys.modules[baseModuleName + str(counter)].Unloaded self.assertEqual( s.query(Unloaded, Unloaded.value == magicOffset + counter).count(), 1) class FailurePathTests(unittest.TestCase): def testNoCrossStoreRefs(self): s1 = store.Store() s2 = store.Store() t1 = TestItem(store=s1) self.assertRaises(errors.NoCrossStoreReferences, TestItem, store=s2, other=t1) t2 = TestItem(store=s2) self.assertRaises(errors.NoCrossStoreReferences, setattr, t2, 'other', t1) self.assertRaises(errors.NoCrossStoreReferences, setattr, t2, 'other', s1) t3 = TestItem(other=t1) self.assertRaises(errors.NoCrossStoreReferences, setattr, t3, 'store', s2) t3.store = s1 self.assertEquals(list(s1.query(TestItem)), [t1, t3]) class ItemTests(unittest.TestCase): def setUp(self): self.dbdir = filepath.FilePath(self.mktemp()) self.store = store.Store(self.dbdir) def tearDown(self): self.store.close() def testFirstActivationHappensWhenAttributesAreSet(self): tio = TestItem(store=self.store) ti = TestItem(store=self.store, checkactive=True, other=tio) self.assertEquals(ti.checked, True) def testItemCreation(self): timeval = extime.Time.fromISO8601TimeAndDate('2004-10-05T10:12:14.1234') s = TestItem( foo = 42, bar = u'hello world', baz = timeval, booleanT = True, booleanF = False ) s.myStore = self.store s.store = self.store sid = s.storeID self.store.close() self.store = store.Store(self.dbdir) s2 = self.store.getItemByID(sid) self.assertEquals(s2.foo, s.foo) self.assertEquals(s2.booleanT, s.booleanT) self.assertEquals(s2.booleanF, s.booleanF) self.assertIdentical(s2.myStore, self.store) def testBasicQuery(self): def tt(): # !@#$ 3x+ speedup over not doing this in a transact() created = [TestItem(foo=x, bar=u"string-value-of-"+str(x)) for x in range(20)] for c in created: c.store = self.store self.store.transact(tt) loaded = self.store.query(TestItem, TestItem.foo >= 10) self.assertEquals(len(list(loaded)), 10) def testCreateThenDelete(self): timeval = extime.Time.fromISO8601TimeAndDate('2004-10-05T10:12:14.1234') sid = [] def txn(): s = TestItem( store = self.store, foo = 42, bar = u'hello world', baz = timeval, booleanT = True, booleanF = False ) sid.append(s.storeID) self.assertEquals(list(self.store.query(TestItem)), [s]) s.deleteFromStore() self.assertEquals(list(self.store.query(TestItem)), []) # hmm. possibly due its own test. # self.assertRaises(KeyError, self.store.getItemByID, sid[0]) self.store.transact(txn) self.assertRaises(KeyError, self.store.getItemByID, sid[0]) self.assertEquals(list(self.store.query(TestItem)), []) def test_getNeverInsertedItem(self): """ Verify that using getItemByID with a default object to attempt to load by storeID an Item which was created and deleted within a single transaction results in the default object. """ def txn(): a = TestItem(store=self.store) storeID = a.storeID a.deleteFromStore() del a return storeID storeID = self.store.transact(txn) default = object() result = self.store.getItemByID(storeID, default=default) self.assertIdentical(result, default) def testInMemoryRevert(self): item1 = TestItem( store=self.store, foo=24, bar=u'Zoom', baz=extime.Time.fromISO8601TimeAndDate('2004-10-05T10:12:14.1234') ) def brokenFunction(): item2 = TestItem( store=self.store, foo=42, bar=u'mooZ', baz=extime.Time.fromISO8601TimeAndDate('1970-03-12T05:05:11.5921') ) item1.foo = 823 item1.bar = u'this is the wrong answer' item1.baz = extime.Time() raise RevertException(item2.storeID) try: self.store.transact(brokenFunction) except RevertException, exc: [storeID] = exc.args self.assertRaises(KeyError, self.store.getItemByID, storeID) self.assertEquals(item1.foo, 24) self.assertEquals(item1.bar, u'Zoom') self.assertEquals(item1.baz.asISO8601TimeAndDate(), '2004-10-05T10:12:14.1234+00:00') else: self.fail("Transaction should have raised an exception") class AttributefulItem(item.Item): schemaVersion = 1 typeName = 'test_attributeful_item' withDefault = attributes.integer(default=42) withoutDefault = attributes.integer() class StricterItem(item.Item): schemaVersion = 1 typeName = 'test_stricter_item' aRef = attributes.reference(allowNone=False) class AttributeTests(unittest.TestCase): def testGetAttribute(self): s = store.Store() def testGetAttribute(): x = AttributefulItem(store=s) y = AttributefulItem(store=s, withDefault=20) z = AttributefulItem(store=s, withoutDefault=30) for o in x, y, z: o.checkpoint() self.assertEquals(x.withDefault, 42) self.assertEquals(x.withoutDefault, None) self.assertEquals(y.withDefault, 20) self.assertEquals(y.withoutDefault, None) self.assertEquals(z.withDefault, 42) self.assertEquals(z.withoutDefault, 30) s.transact(testGetAttribute) def testIntegerAttribute_SQLiteBug(self): # SQLite 3.2.1 has a bug which causes various integers to be stored # incorrect. For example, 2 ** 48 - 1 is stored as -1. This is # fixed in 3.2.7. for power in 8, 16, 24, 32, 48, 63: s = store.Store() input = 2 ** power - 1 s.transact( AttributefulItem, store=s, withoutDefault=input) output = s.findFirst(AttributefulItem).withoutDefault self.assertEquals(input, output) s.close() if sqlite_version_info < (3, 2, '7'): testIntegerAttribute_SQLiteBug.todo = ( "If this test fails on your system, you should really upgrade SQLite " "to at least 3.2.7. Not doing so will lead to corruption of your " "data.") def testQueries(self): s = store.Store() def testQueries(): x = AttributefulItem(store=s, withDefault=50) y = AttributefulItem(store=s, withDefault=30) z = AttributefulItem(store=s, withoutDefault=30) for o in x, y, z: o.checkpoint() self.assertEquals( list(s.query(AttributefulItem, AttributefulItem.withoutDefault != None, sort=AttributefulItem.withoutDefault.desc)), [z]) self.assertEquals( list(s.query(AttributefulItem, sort=AttributefulItem.withDefault.desc)), [x, z, y]) s.transact(testQueries) def testDontAllowNone(self): s = store.Store() def testDontAllowNone(): try: x = StricterItem(store=s) except TypeError: pass else: self.fail("Creating a StricterItem without an aRef value should have failed") a = AttributefulItem(store=s) x = StricterItem(store=s, aRef=a) self.assertEquals(x.aRef, a) try: x.aRef = None except TypeError: pass else: self.fail("Setting aRef to None on a StricterItem should have failed") s.transact(testDontAllowNone) class TestFindOrCreate(unittest.TestCase): def testCreate(self): s = store.Store() ai = s.findOrCreate(AttributefulItem) self.assertEquals(ai.withDefault, 42) self.assertEquals(ai.withoutDefault, None) def testFind(self): s = store.Store() ai = s.findOrCreate(AttributefulItem, withoutDefault=1234) ai2 = s.findOrCreate(AttributefulItem, withDefault=42) ai3 = s.findOrCreate(AttributefulItem) ai4 = s.findOrCreate(AttributefulItem, withDefault=71) ai5 = s.findOrCreate(AttributefulItem, withDefault=71) self.assertIdentical(ai, ai2) self.assertIdentical(ai3, ai2) self.assertIdentical(ai4, ai5) self.assertNotIdentical(ai, ai4) def testIfNew(self): l = [] s = store.Store() ai1 = s.findOrCreate(AttributefulItem, l.append, withDefault=1234) ai2 = s.findOrCreate(AttributefulItem, l.append, withDefault=1234) ai3 = s.findOrCreate(AttributefulItem, l.append, withDefault=4321) ai4 = s.findOrCreate(AttributefulItem, l.append, withDefault=4321) self.assertEquals(len(l), 2) self.assertEquals(l, [ai1, ai3]) def testFindFirst(self): s = store.Store() a0 = ai = AttributefulItem(store=s) ai2 = s.findFirst(AttributefulItem, AttributefulItem.withDefault == 42) shouldBeNone = s.findFirst(AttributefulItem, AttributefulItem.withDefault == 99) self.assertEquals(ai, ai2) self.assertEquals(shouldBeNone, None) ai = AttributefulItem(store=s, withDefault=24) ai2 = s.findFirst(AttributefulItem, AttributefulItem.withDefault == 24) self.assertEquals(ai, ai2) ai = AttributefulItem(store=s, withDefault=55) ai2 = s.findFirst(AttributefulItem) self.assertEquals(a0, ai2) class DeletedTrackingItem(item.Item): """ Helper class for testing that C{deleted} is called by ItemQuery.deleteFromStore. """ deletedTimes = 0 value = attributes.integer() def deleted(self): DeletedTrackingItem.deletedTimes += 1 class DeleteFromStoreTrackingItem(item.Item): """ Helper class for testing that C{deleteFromStore} is called by ItemQuery.deleteFromStore. """ deletedTimes = 0 value = attributes.integer() def deleteFromStore(self): DeleteFromStoreTrackingItem.deletedTimes += 1 item.Item.deleteFromStore(self) class MassInsertDeleteTests(unittest.TestCase): def setUp(self): self.storepath = filepath.FilePath(self.mktemp()) self.store = store.Store(self.storepath) def testBatchInsert(self): """ Make sure that batchInsert creates all the items it's supposed to with appropriate attributes. """ dataRows = [(37, 93), (1, 2)] self.store.batchInsert(AttributefulItem, [AttributefulItem.withDefault, AttributefulItem.withoutDefault], dataRows) items = list(self.store.query(AttributefulItem)) self.assertEquals(items[0].withDefault, 37) self.assertEquals(items[0].withoutDefault, 93) self.assertEquals(items[1].withDefault, 1) self.assertEquals(items[1].withoutDefault, 2) def testTransactedBatchInsert(self): """ Test that batchInsert works in a transaction. """ dataRows = [(37, 93), (1, 2)] self.store.transact(self.store.batchInsert, AttributefulItem, [AttributefulItem.withDefault, AttributefulItem.withoutDefault], dataRows) items = list(self.store.query(AttributefulItem)) self.assertEquals(items[0].withDefault, 37) self.assertEquals(items[0].withoutDefault, 93) self.assertEquals(items[1].withDefault, 1) self.assertEquals(items[1].withoutDefault, 2) def testBatchInsertReference(self): """ Test that reference args are handled okay by batchInsert. """ itemA = AttributefulItem(store=self.store) itemB = AttributefulItem(store=self.store) dataRows = [(1, u"hello", extime.Time(), itemA, True, False, self.store), (2, u"hoorj", extime.Time(), itemB, False, True, self.store)] self.store.batchInsert(TestItem, [TestItem.foo, TestItem.bar, TestItem.baz, TestItem.other, TestItem.booleanT, TestItem.booleanF, TestItem.myStore], dataRows) items = list(self.store.query(TestItem)) self.assertEquals(items[0].other, itemA) self.assertEquals(items[1].other, itemB) self.assertEquals(items[0].store, self.store) self.assertEquals(items[1].store, self.store) def testMemoryBatchInsert(self): """ Test that batchInsert works on an in-memory store. """ self.store = store.Store() self.testBatchInsert() def testBatchInsertSelectedAttributes(self): """ Test that batchInsert does the right thing when only a few attributes are being set. """ dataRows = [(u"hello", 50, False, self.store), (u"hoorj", None, True, self.store)] self.store.batchInsert(TestItem, [TestItem.bar, TestItem.foo, TestItem.booleanF, TestItem.myStore], dataRows) items = list(self.store.query(TestItem)) self.assertEquals(items[0].other, None) self.assertEquals(items[1].other, None) self.assertEquals(items[0].foo, 50) self.assertEquals(items[1].foo, None) self.assertEquals(items[0].bar, u"hello") self.assertEquals(items[1].bar, u"hoorj") self.assertEquals(items[0].store, self.store) self.assertEquals(items[1].store, self.store) def testBatchDelete(self): """ Ensure that unqualified batchDelete removes all the items of a certain class. """ for i in xrange(10): AttributefulItem(store=self.store, withoutDefault=i) self.store.query(AttributefulItem).deleteFromStore() self.assertEquals(list(self.store.query(AttributefulItem)), []) def testBatchDeleteCondition(self): """ Ensure that conditions for batchDelete are honored properly. """ for i in xrange(10): AttributefulItem(store=self.store, withoutDefault=i) self.store.query(AttributefulItem, AttributefulItem.withoutDefault > 4 ).deleteFromStore() self.assertEquals(self.store.query(AttributefulItem).count(), 5) def testSlowBatchDelete(self): """ Ensure that a 'deleted' method on an Item will be called if it exists. """ DeletedTrackingItem(store=self.store) self.store.query(DeletedTrackingItem).deleteFromStore() self.assertEqual(DeletedTrackingItem.deletedTimes, 1) def test_slowBatchDeleteBecauseDeletedFromStore(self): """ Ensure that a 'deleteFromStore' method on an Item will be called if it exists. """ DeleteFromStoreTrackingItem(store=self.store) self.store.query(DeleteFromStoreTrackingItem).deleteFromStore() self.assertEqual(DeleteFromStoreTrackingItem.deletedTimes, 1) # Item types we will use to change the underlying database schema (by creating # them). class ConcurrentItemA(item.Item): anAttribute = attributes.text() class ConcurrentItemB(item.Item): anotherAttribute = attributes.integer() class ProcessConcurrencyTestCase(unittest.TestCase, protocol.ProcessProtocol): def spawn(self, *args): self.d = defer.Deferred() from twisted.internet import reactor reactor.spawnProcess( self, sys.executable, [sys.executable] + list(args), os.environ) return self.d ok = 0 def outReceived(self, data): if data == '1': # step 1: create an item cia = ConcurrentItemA(store=self.store, anAttribute=u'aaa') # then tell the subprocess to load it self.transport.write(str(cia.storeID)+'\n') elif data == '2': # step 2: the subprocess has notified us that it has successfully # completed self.ok = 1 def errReceived(self, data): # we should never *really* get anything to stdout print data def processEnded(self, reason): # total correctness would have us checking the exit code too, but we # got all the output we expected, so whatever. if self.ok: self.d.callback('OK') else: self.d.errback(reason) def testNewItemTypeInSubprocess(self): dbdir = filepath.FilePath(self.mktemp()) self.store = store.Store(dbdir) # Open the store and leave its schema empty (don't create any items) # until the subprocess has opened it and loaded the bogus schema. return self.spawn(sibpath(__file__, "openthenload.py"), dbdir.path) class ConcurrencyTestCase(unittest.TestCase): def testSchemaChange(self): """ When a statement is executed after the underlying schema has been changed (tables are added, database is vacuumed, etc) by another thread of execution, PySQLite2 will raise an OperationalError. This is basically harmless and the query will work if re-executed. This should be done transparently. """ dbdir = filepath.FilePath(self.mktemp()) firstStore = store.Store(dbdir) ConcurrentItemA(store=firstStore) secondStore = store.Store(dbdir) self.assertNotIdentical(firstStore, secondStore) # if this line starts # breaking, rewrite # this test. ConcurrentItemB(store=firstStore) self.assertEquals(secondStore.query(ConcurrentItemA).count(), 1) def testNewItemType(self): """ Creating the first instance of a of an Item subclass changes the underlying database schema as well as some Store-private state which tracks that schema. Test to make sure that creating the first instance of an Item subclass in one store is reflected in a second store. """ dbdir = filepath.FilePath(self.mktemp()) firstStore = store.Store(dbdir) secondStore = store.Store(dbdir) ConcurrentItemA(store=firstStore) self.assertEquals(secondStore.query(ConcurrentItemA).count(), 1) class LoggingTests(unittest.TestCase): """ Tests for log events emitted by L{axiom.store}. """ def _openTest(self, dbdir, expectedValue): events = [] log.addObserver(events.append) self.addCleanup(log.removeObserver, events.append) store.Store(dbdir).close() for event in events: if event.get('interface') is not IStatEvent: continue if event.get('store_opened') != expectedValue: continue break else: self.fail("store_opened IStatEvent not emitted") def test_openOnDisk(self): """ Opening a file-backed store logs an event including the path to the store. """ dbdir = self.mktemp() self._openTest(dbdir, os.path.abspath(dbdir)) def test_openInMemory(self): """ Opening a memory-backed store logs an event with an empty string for the path to the store. """ self._openTest(None, '') Axiom-0.6.0/axiom/test/toonewapp.py0000644000175000017500000000315410412631340017165 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading.SchemaUpgradeTest.testUpgradeWithMissingVersion -*- from axiom.item import Item from axiom.attributes import text, integer, reference, inmemory from axiom.upgrade import registerUpgrader class ActivateHelper: activated = 0 def activate(self): self.activated += 1 class Adventurer(ActivateHelper, Item): typeName = 'test_app_player' schemaVersion = 2 name = text() activated = inmemory() class InventoryEntry(ActivateHelper, Item): typeName = 'test_app_inv' schemaVersion = 1 owner = reference() owned = reference() activated = inmemory() class Sword(ActivateHelper, Item): typeName = 'test_app_sword' schemaVersion = 3 name = text() damagePerHit = integer() activated = inmemory() def owner(): def get(self): return self.store.findUnique(InventoryEntry, InventoryEntry.owned == self).owner return get, owner = property(*owner()) def sword2to3(oldsword): newsword = oldsword.upgradeVersion('test_app_sword', 2, 3) n = oldsword.store.getOldVersionOf('test_app_sword', 2) itrbl = oldsword.store.query(n) newsword.name = oldsword.name newsword.damagePerHit = oldsword.damagePerHit invent = InventoryEntry(store=newsword.store, owner=oldsword.owner, owned=newsword) return newsword registerUpgrader(sword2to3, 'test_app_sword', 2, 3) # ... # deliberately missing upgraders for faulty upgrade path tests: see # morenewapp.py for multi-step upgrades. Axiom-0.6.0/axiom/test/util.py0000644000175000017500000001121611046607253016136 0ustar exarkunexarkun """ Helpers for writing Axiom tests. """ from twisted.python.filepath import FilePath from twisted.trial.unittest import SkipTest from axiom.store import Store _theBaseStorePaths = {} def _getBaseStorePath(testCase, creator): if creator not in _theBaseStorePaths: s = creator(testCase) _theBaseStorePaths[creator] = s.dbdir s.close() return _theBaseStorePaths[creator] def getPristineStore(testCase, creator): """ Get an Axiom Store which has been created and initialized by C{creator} but which has been otherwise untouched. If necessary, C{creator} will be called to make one. @type testCase: L{twisted.trial.unittest.TestCase} @type creator: one-argument callable @param creator: A factory for the Store configuration desired. Will be invoked with the testCase instance if necessary. @rtype: L{axiom.store.Store} """ dbdir = FilePath(testCase.mktemp()) basePath = _getBaseStorePath(testCase, creator) basePath.copyTo(dbdir) return Store(dbdir) class CommandStubMixin: """ Pretend to be the parent command for a subcommand. """ def getStore(self): # fake out "parent" implementation for stuff. return self.store def getSynopsis(self): return '' subCommand = property(lambda self: self.__class__.__name__) class CommandStub(object): """ Mock for L{axiom.scripts.axiomatic.Options} which is always set as the C{parent} attribute of an I{axiomatic} subcommand. @ivar _store: The L{Store} associated which will be supplied to the subcommand. """ def __init__(self, store, subCommand): self._store = store self.subCommand = subCommand def getSynopsis(self): return "Usage: axiomatic [options]" def getStore(self): return self._store class QueryCounter: """ This is a counter object which measures the number of VDBE instructions SQLite will execute to fulfill a particular query. The count of VDBE instructions is very useful as a proxy for CPU time and disk usage, because it (as opposed to CPU time and disk usage) is deterministic between runs of a given query regardless of various accidents of operating-system latency. When creating data for a query involving a limit, start with B{more} Items than will be returned by the limited query, not exactly the right number. SQLite will do a little bit more work in the case where the limit restricts the number of Items returned, and this will cause a test to fail even though the performance characteristics being demonstrated are actually correct. Put another way, if you are testing:: s.query(MyItem, limit=5) You should create six instances of C{MyItem} before the first C{measure} call and then create one or more additional instances of C{MyItem} before the second C{measure} call. """ def __init__(self, store): """ Create a new query counter and install it on the provided store. @param store: an axiom L{Store}. """ self.reset() self.store = store c = self.store.connection._connection # XXX: this only works with the pysqlite backend, even _with_ the hack # detection; if we ever care about the apsw backend again, we should # probably do something about adding the hack to it, adding this as a # public Axiom API, or something. sph = getattr(c, "set_progress_handler", None) if sph is None: raise SkipTest( "QueryCounter requires PySQLite 2.4 or newer, or a patch " "(see ) to " "expose the set_progress_handler API.") sph(self.progressHandler, 1) def progressHandler(self): """ This method will be called internally by SQLite for each bytecode executed. It increments a counter. @return: 0, aka SQLITE_OK, so that this does not abort the current query. """ self.counter += 1 return 0 def reset(self): """Reset the internal counter to 0. """ self.counter = 0 def measure(self, f, *a, **k): """ The primary public API of this class, which runs a given function and counts the number of bytecodes between its start and finish. @return: an integer, the number of VDBE instructions executed. """ save = self.counter self.reset() try: f(*a, **k) finally: result = self.counter self.counter = save return result Axiom-0.6.0/axiom/__init__.py0000644000175000017500000000021610765057253015745 0ustar exarkunexarkun# -*- test-case-name: axiom.test -*- from axiom._version import version version # tell pyflakes we're exporting it. Axiom-0.6.0/axiom/__init__.pyc0000644000175000017500000000030611304543322016073 0ustar exarkunexarkun ^Gc@sddklZedS(i(tversionN(taxiom._versionR(((s+/tmp/Divmod-release/Axiom/axiom/__init__.pytsAxiom-0.6.0/axiom/_fincache.py0000644000175000017500000000510010413554225016071 0ustar exarkunexarkun from weakref import ref from traceback import print_exc from twisted.python import log from axiom import iaxiom class CacheFault(RuntimeError): """ A serious problem has occurred within the cache. This error is internal and should never really be trapped. """ def logErrorNoMatterWhat(): try: log.msg("Exception in finalizer cannot be propagated") log.err() except: try: emergLog = file("WEAKREF_EMERGENCY_ERROR.log", 'a') print_exc(file=emergLog) emergLog.flush() emergLog.close() except: # Nothing can be done. We can't get an emergency log file to write # to. Don't bother. return def createCacheRemoveCallback(w, k, f): def remove(self): # Weakref callbacks cannot raise exceptions or DOOM ensues try: f() except: logErrorNoMatterWhat() try: self = w() if self is not None: del self.data[k] except: logErrorNoMatterWhat() return remove PROFILING = False class FinalizingCache: """Possibly useful for infrastructure? This would be a nice addition (or perhaps even replacement) for twisted.python.finalize. """ def __init__(self): self.data = {} if not PROFILING: # see docstring for 'has' self.has = self.data.has_key def cache(self, key, value): fin = value.__finalizer__() assert key not in self.data, "Duplicate cache key: %r %r %r" % (key, value, self.data[key]) self.data[key] = ref(value, createCacheRemoveCallback( ref(self), key, fin)) return value def uncache(self, key, value): assert self.get(key) is value del self.data[key] def has(self, key): """Does the cache have this key? (This implementation is only used if the system is being profiled, due to bugs in Python's old profiler and its interaction with weakrefs. Set the module attribute PROFILING to True at startup for this.) """ if key in self.data: o = self.data[key]() if o is None: del self.data[key] return False return True return False def get(self, key): o = self.data[key]() if o is None: raise CacheFault( "FinalizingCache has %r but its value is no more." % (key,)) log.msg(interface=iaxiom.IStatEvent, stat_cache_hits=1, key=key) return o Axiom-0.6.0/axiom/_pysqlite2.py0000644000175000017500000001201310665061677016303 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_pysqlite2 -*- """ PySQLite2 Connection and Cursor wrappers. These provide a uniform interface on top of PySQLite2 for Axiom, particularly including error handling behavior and exception types. """ import time from pysqlite2 import dbapi2 from twisted.python import log from axiom import errors, iaxiom class Connection(object): def __init__(self, connection, timeout=None): self._connection = connection self._timeout = timeout def fromDatabaseName(cls, dbFilename, timeout=None, isolationLevel=None): return cls(dbapi2.connect(dbFilename, timeout=0, isolation_level=isolationLevel)) fromDatabaseName = classmethod(fromDatabaseName) def cursor(self): return Cursor(self, self._timeout) def identifySQLError(self, sql, args, e): """ Identify an appropriate SQL error object for the given message for the supported versions of sqlite. @return: an SQLError """ message = e.args[0] if message.startswith("table") and message.endswith("already exists"): return errors.TableAlreadyExists(sql, args, e) return errors.SQLError(sql, args, e) class Cursor(object): def __init__(self, connection, timeout): self._connection = connection self._cursor = connection._connection.cursor() self.timeout = timeout def __iter__(self): return iter(self._cursor) def time(self): """ Return the current wallclock time as a float representing seconds from an fixed but arbitrary point. """ return time.time() def sleep(self, seconds): """ Block for the given number of seconds. @type seconds: C{float} """ time.sleep(seconds) def execute(self, sql, args=()): try: try: blockedTime = 0.0 t = self.time() try: # SQLite3 uses something like exponential backoff when # trying to acquire a database lock. This means that even # for very long timeouts, it may only attempt to acquire # the lock a handful of times. Another process which is # executing frequent, short-lived transactions may acquire # and release the lock many times between any two attempts # by this one to acquire it. If this process gets unlucky # just a few times, this execute may fail to acquire the # lock within the specified timeout. # Since attempting to acquire the lock is a fairly cheap # operation, we take another route. SQLite3 is always told # to use a timeout of 0 - ie, acquire it on the first try # or fail instantly. We will keep doing this, ten times a # second, until the actual timeout expires. # What would be really fantastic is a notification # mechanism for information about the state of the lock # changing. Of course this clearly insane, no one has ever # managed to invent a tool for communicating one bit of # information between multiple processes. while 1: try: return self._cursor.execute(sql, args) except dbapi2.OperationalError, e: if e.args[0] == 'database is locked': now = self.time() if self.timeout is not None: if (now - t) > self.timeout: raise errors.TimeoutError(sql, self.timeout, e) self.sleep(0.1) blockedTime = self.time() - t else: raise finally: txntime = self.time() - t if txntime - blockedTime > 2.0: log.msg('Extremely long execute: %s' % (txntime - blockedTime,)) log.msg(sql) # import traceback; traceback.print_stack() log.msg(interface=iaxiom.IStatEvent, stat_cursor_execute_time=txntime, stat_cursor_blocked_time=blockedTime) except dbapi2.OperationalError, e: if e.args[0] == 'database schema has changed': return self._cursor.execute(sql, args) raise except (dbapi2.ProgrammingError, dbapi2.InterfaceError, dbapi2.OperationalError), e: raise self._connection.identifySQLError(sql, args, e) def lastRowID(self): return self._cursor.lastrowid def close(self): self._cursor.close() Axiom-0.6.0/axiom/_schema.py0000644000175000017500000000432410412631340015571 0ustar exarkunexarkun # DELETE_OBJECT = 'DELETE FROM axiom_objects WHERE oid = ?' CREATE_OBJECT = 'INSERT INTO *DATABASE*.axiom_objects (type_id) VALUES (?)' CREATE_TYPE = 'INSERT INTO *DATABASE*.axiom_types (typename, module, version) VALUES (?, ?, ?)' BASE_SCHEMA = [""" CREATE TABLE *DATABASE*.axiom_objects ( type_id INTEGER NOT NULL CONSTRAINT fk_type_id REFERENCES axiom_types(oid) ) """, """ CREATE INDEX *DATABASE*.axiom_objects_type_idx ON axiom_objects(type_id); """, """ CREATE TABLE *DATABASE*.axiom_types ( typename VARCHAR, module VARCHAR, version INTEGER ) """, """ CREATE TABLE *DATABASE*.axiom_attributes ( type_id INTEGER, row_offset INTEGER, indexed BOOLEAN, sqltype VARCHAR, allow_none BOOLEAN, pythontype VARCHAR, attribute VARCHAR, docstring TEXT ) """] TYPEOF_QUERY = """ SELECT *DATABASE*.axiom_types.typename, *DATABASE*.axiom_types.module, *DATABASE*.axiom_types.version FROM *DATABASE*.axiom_types, *DATABASE*.axiom_objects WHERE *DATABASE*.axiom_objects.oid = ? AND *DATABASE*.axiom_types.oid = *DATABASE*.axiom_objects.type_id """ HAS_SCHEMA_FEATURE = ("SELECT COUNT(oid) FROM *DATABASE*.sqlite_master " "WHERE type = ? AND name = ?") IDENTIFYING_SCHEMA = ('SELECT indexed, sqltype, allow_none, attribute ' 'FROM *DATABASE*.axiom_attributes WHERE type_id = ? ' 'ORDER BY row_offset') ADD_SCHEMA_ATTRIBUTE = ( 'INSERT INTO *DATABASE*.axiom_attributes ' '(type_id, row_offset, indexed, sqltype, allow_none, attribute, docstring, pythontype) ' 'VALUES (?, ?, ?, ?, ?, ?, ?, ?)') ALL_TYPES = 'SELECT oid, module, typename, version FROM *DATABASE*.axiom_types' GET_GREATER_VERSIONS_OF_TYPE = ('SELECT version FROM *DATABASE*.axiom_types ' 'WHERE typename = ? AND version > ?') SCHEMA_FOR_TYPE = ('SELECT indexed, pythontype, attribute, docstring ' 'FROM *DATABASE*.axiom_attributes ' 'WHERE type_id = ?') CHANGE_TYPE = 'UPDATE *DATABASE*.axiom_objects SET type_id = ? WHERE oid = ?' APP_VACUUM = 'DELETE FROM *DATABASE*.axiom_objects WHERE (type_id == -1) AND (oid != (SELECT MAX(oid) from *DATABASE*.axiom_objects))' Axiom-0.6.0/axiom/_version.py0000644000175000017500000000026411304535370016024 0ustar exarkunexarkun# This is an auto-generated file. Use Epsilon/bin/release-divmod to update. from twisted.python import versions version = versions.Version(__name__[:__name__.rfind('.')], 0, 6, 0) Axiom-0.6.0/axiom/_version.pyc0000644000175000017500000000044711304543322016166 0ustar exarkunexarkun Kc@s9ddklZeieeid dddZdS(i(tversionst.iiN(ttwisted.pythonRtVersiont__name__trfindtversion(((s+/tmp/Divmod-release/Axiom/axiom/_version.pytsAxiom-0.6.0/axiom/attributes.py0000644000175000017500000012370511224366113016372 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_attributes,axiom.test.test_reference -*- import os from decimal import Decimal from epsilon import hotfix hotfix.require('twisted', 'filepath_copyTo') from zope.interface import implements from twisted.python import filepath from twisted.python.components import registerAdapter from epsilon.extime import Time from axiom.slotmachine import Attribute as inmemory from axiom.errors import NoCrossStoreReferences, BrokenReference from axiom.iaxiom import IComparison, IOrdering, IColumn, IQuery _NEEDS_FETCH = object() # token indicating that a value was not found __metaclass__ = type class _ComparisonOperatorMuxer: """ Collapse comparison operations into calls to a single method with varying arguments. """ def compare(self, other, op): """ Override this in a subclass. """ raise NotImplementedError() def __eq__(self, other): return self.compare(other, '=') def __ne__(self, other): return self.compare(other, '!=') def __gt__(self, other): return self.compare(other, '>') def __lt__(self, other): return self.compare(other, '<') def __ge__(self, other): return self.compare(other, '>=') def __le__(self, other): return self.compare(other, '<=') def compare(left, right, op): # interim: maybe we want objects later? right now strings should be fine if IColumn.providedBy(right): return TwoAttributeComparison(left, op, right) elif right is None: if op == '=': negate = False elif op == '!=': negate = True else: raise TypeError( "None/NULL does not work with %s comparison" % (op,)) return NullComparison(left, negate) else: # convert to constant usable in the database return AttributeValueComparison(left, op, right) class _MatchingOperationMuxer: """ Collapse string matching operations into calls to a single method with varying arguments. """ def _like(self, negate, firstOther, *others): others = (firstOther,) + others likeParts = [] allValues = True for other in others: if IColumn.providedBy(other): likeParts.append(LikeColumn(other)) allValues = False elif other is None: # LIKE NULL is a silly condition, but it's allowed. likeParts.append(LikeNull()) allValues = False else: likeParts.append(LikeValue(other)) if allValues: likeParts = [LikeValue(''.join(others))] return LikeComparison(self, negate, likeParts) def like(self, *others): return self._like(False, *others) def notLike(self, *others): return self._like(True, *others) def startswith(self, other): return self._like(False, other, '%') def endswith(self, other): return self._like(False, '%', other) _ASC = 'ASC' _DESC = 'DESC' class _OrderingMixin: """ Provide the C{ascending} and C{descending} attributes to specify sort direction. """ def _asc(self): return SimpleOrdering(self, _ASC) def _desc(self): return SimpleOrdering(self, _DESC) desc = descending = property(_desc) asc = ascending = property(_asc) class _ContainableMixin: def oneOf(self, seq, negate=False): """ Choose items whose attributes are in a fixed set. X.oneOf([1, 2, 3]) Implemented with the SQL 'in' statement. """ return SequenceComparison(self, seq, negate) def notOneOf(self, seq): return self.oneOf(seq, negate=True) class Comparable(_ContainableMixin, _ComparisonOperatorMuxer, _MatchingOperationMuxer, _OrderingMixin): """ Helper for a thing that can be compared like an SQLAttribute (or is in fact an SQLAttribute). Requires that 'self' have 'type' (Item-subclass) and 'columnName' (str) attributes, as well as an 'infilter' method in the spirit of SQLAttribute, documented below. """ # XXX TODO: improve error reporting def compare(self, other, sqlop): return compare(self, other, sqlop) class SimpleOrdering: """ Currently this class is mostly internal. More documentation will follow as its interface is finalized. """ implements(IOrdering) # maybe this will be a useful public API, for the query something # something. isDescending = property(lambda self: self.direction == _DESC) isAscending = property(lambda self: self.direction == _ASC) def __init__(self, attribute, direction=''): self.attribute = attribute self.direction = direction def orderColumns(self): return [(self.attribute, self.direction)] def __repr__(self): return repr(self.attribute) + self.direction def __add__(self, other): if isinstance(other, SimpleOrdering): return CompoundOrdering([self, other]) elif isinstance(other, (list, tuple)): return CompoundOrdering([self] + list(other)) else: return NotImplemented def __radd__(self, other): if isinstance(other, SimpleOrdering): return CompoundOrdering([other, self]) elif isinstance(other, (list, tuple)): return CompoundOrdering(list(other) + [self]) else: return NotImplemented class CompoundOrdering: """ List of SimpleOrdering instances. """ implements(IOrdering) def __init__(self, seq): self.simpleOrderings = list(seq) def __repr__(self): return self.__class__.__name__ + '(' + repr(self.simpleOrderings) + ')' def __add__(self, other): """ Just thinking about what might be useful from the perspective of introspecting on query objects... don't document this *too* thoroughly yet. """ if isinstance(other, CompoundOrdering): return CompoundOrdering(self.simpleOrderings + other.simpleOrderings) elif isinstance(other, SimpleOrdering): return CompoundOrdering(self.simpleOrderings + [other]) elif isinstance(other, (list, tuple)): return CompoundOrdering(self.simpleOrderings + list(other)) else: return NotImplemented def __radd__(self, other): """ Just thinking about what might be useful from the perspective of introspecting on query objects... don't document this *too* thoroughly yet. """ if isinstance(other, CompoundOrdering): return CompoundOrdering(other.simpleOrderings + self.simpleOrderings) elif isinstance(other, SimpleOrdering): return CompoundOrdering([other] + self.simpleOrderings) elif isinstance(other, (list, tuple)): return CompoundOrdering(list(other) + self.simpleOrderings) else: return NotImplemented def orderColumns(self): x = [] for o in self.simpleOrderings: x.extend(o.orderColumns()) return x class UnspecifiedOrdering: implements(IOrdering) def __init__(self, null): pass def __add__(self, other): return IOrdering(other, NotImplemented) __radd__ = __add__ def orderColumns(self): return [] registerAdapter(CompoundOrdering, list, IOrdering) registerAdapter(CompoundOrdering, tuple, IOrdering) registerAdapter(UnspecifiedOrdering, type(None), IOrdering) registerAdapter(SimpleOrdering, Comparable, IOrdering) def compoundIndex(*columns): for column in columns: column.compoundIndexes.append(columns) class SQLAttribute(inmemory, Comparable): """ Abstract superclass of all attributes. _Not_ an attribute itself. @ivar indexed: A C{bool} indicating whether this attribute will be indexed in the database. @ivar default: The value used for this attribute, if no value is specified. """ implements(IColumn) sqltype = None def __init__(self, doc='', indexed=False, default=None, allowNone=True, defaultFactory=None): inmemory.__init__(self, doc) self.indexed = indexed self.compoundIndexes = [] self.allowNone = allowNone self.default = default self.defaultFactory = defaultFactory if default is not None and defaultFactory is not None: raise ValueError("You may specify only one of default " "or defaultFactory, not both") def computeDefault(self): if self.defaultFactory is not None: return self.defaultFactory() return self.default def reprFor(self, oself): return repr(self.__get__(oself)) def getShortColumnName(self, store): return store.getShortColumnName(self) def getColumnName(self, store): return store.getColumnName(self) def prepareInsert(self, oself, store): """ Override this method to do something to an item to prepare for its insertion into a database. """ def coercer(self, value): """ must return a value equivalent to the data being passed in for it to be considered valid for a value of this attribute. for example, 'int' or 'str'. """ raise NotImplementedError() def infilter(self, pyval, oself, store): """ used to convert a Python value to something that lives in the database; so called because it is called when objects go in to the database. It takes a Python value and returns an SQL value. """ raise NotImplementedError() def outfilter(self, dbval, oself): """ used to convert an SQL value to something that lives in memory; so called because it is called when objects come out of the database. It takes an SQL value and returns a Python value. """ return dbval # requiredSlots must be called before it's run prefix = "_axiom_memory_" dbprefix = "_axiom_store_" def requiredSlots(self, modname, classname, attrname): self.modname = modname self.classname = classname self.attrname = attrname self.underlying = self.prefix + attrname self.dbunderlying = self.dbprefix + attrname yield self.underlying yield self.dbunderlying def fullyQualifiedName(self): return '.'.join([self.modname, self.classname, self.attrname]) def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, self.fullyQualifiedName()) def type(): def get(self): if self._type is None: from twisted.python.reflect import namedAny self._type = namedAny(self.modname+'.'+self.classname) return self._type return get, _type = None type = property(*type()) def __get__(self, oself, cls=None): if cls is not None and oself is None: if self._type is not None: assert self._type == cls else: self._type = cls return self pyval = getattr(oself, self.underlying, _NEEDS_FETCH) if pyval is _NEEDS_FETCH: dbval = getattr(oself, self.dbunderlying, _NEEDS_FETCH) if dbval is _NEEDS_FETCH: # here is what *is* happening here: # SQL attributes are always loaded when an Item is created by # loading from the database, either via a query, a getItemByID # or an attribute access. If an attribute is left un-set, that # means that the item it is on was just created, and we fill in # the default value. # Here is what *should be*, but *is not* happening here: # this condition ought to indicate that a value may exist in # the database, but it is not currently available in memory. # It would then query the database immediately, loading all # SQL-resident attributes related to this item to minimize the # number of queries run (e.g. rather than one per attribute) # this is a more desireable condition because it means that you # can create items "for free", so doing, for example, # self.bar.storeID is a much cheaper operation than doing # self.bar.baz. This particular idiom is frequently used in # queries and so speeding it up to avoid having to do a # database hit unless you actually need an item's attributes # would be worthwhile. return self.default pyval = self.outfilter(dbval, oself) # An upgrader may have changed the value of this attribute. If so, # return the new value, not the old one. if dbval != getattr(oself, self.dbunderlying): return self.__get__(oself, cls) # cache python value setattr(oself, self.underlying, pyval) return pyval def loaded(self, oself, dbval): """ This method is invoked when the item is loaded from the database, and when a transaction is reverted which restores this attribute's value. @param oself: an instance of an item which has this attribute. @param dbval: the underlying database value which was retrieved. """ setattr(oself, self.dbunderlying, dbval) delattr(oself, self.underlying) # member_descriptors don't raise # attribute errors; what gives? good # for us, I guess. def _convertPyval(self, oself, pyval): """ Convert a Python value to a value suitable for inserting into the database. @param oself: The object on which this descriptor is an attribute. @param pyval: The value to be converted. @return: A value legal for this column in the database. """ # convert to dbval later, I guess? if pyval is None and not self.allowNone: raise TypeError("attribute [%s.%s = %s()] must not be None" % ( self.classname, self.attrname, self.__class__.__name__)) return self.infilter(pyval, oself, oself.store) def __set__(self, oself, pyval): st = oself.store dbval = self._convertPyval(oself, pyval) oself.__dirty__[self.attrname] = self, dbval oself.touch() setattr(oself, self.underlying, pyval) setattr(oself, self.dbunderlying, dbval) if st is not None and st.autocommit: st._rejectChanges += 1 try: oself.checkpoint() finally: st._rejectChanges -= 1 class TwoAttributeComparison: implements(IComparison) def __init__(self, leftAttribute, operationString, rightAttribute): self.leftAttribute = leftAttribute self.operationString = operationString self.rightAttribute = rightAttribute def getQuery(self, store): sql = ('(%s %s %s)' % (self.leftAttribute.getColumnName(store), self.operationString, self.rightAttribute.getColumnName(store)) ) return sql def getInvolvedTables(self): tables = [self.leftAttribute.type] if self.leftAttribute.type is not self.rightAttribute.type: tables.append(self.rightAttribute.type) return tables def getArgs(self, store): return [] def __repr__(self): return ' '.join((self.leftAttribute.fullyQualifiedName(), self.operationString, self.rightAttribute.fullyQualifiedName())) class AttributeValueComparison: implements(IComparison) def __init__(self, attribute, operationString, value): self.attribute = attribute self.operationString = operationString self.value = value def getQuery(self, store): return ('(%s %s ?)' % (self.attribute.getColumnName(store), self.operationString)) def getArgs(self, store): return [self.attribute.infilter(self.value, None, store)] def getInvolvedTables(self): return [self.attribute.type] def __repr__(self): return ' '.join((self.attribute.fullyQualifiedName(), self.operationString, repr(self.value))) class NullComparison: implements(IComparison) def __init__(self, attribute, negate=False): self.attribute = attribute self.negate = negate def getQuery(self, store): if self.negate: op = 'NOT' else: op = 'IS' return ('(%s %s NULL)' % (self.attribute.getColumnName(store), op)) def getArgs(self, store): return [] def getInvolvedTables(self): return [self.attribute.type] class LikeFragment: def getLikeArgs(self): return [] def getLikeQuery(self, st): raise NotImplementedError() def getLikeTables(self): return [] class LikeNull(LikeFragment): def getLikeQuery(self, st): return "NULL" class LikeValue(LikeFragment): def __init__(self, value): self.value = value def getLikeQuery(self, st): return "?" def getLikeArgs(self): return [self.value] class LikeColumn(LikeFragment): def __init__(self, attribute): self.attribute = attribute def getLikeQuery(self, st): return self.attribute.getColumnName(st) def getLikeTables(self): return [self.attribute.type] class LikeComparison: implements(IComparison) # Not AggregateComparison or AttributeValueComparison because there is a # different, optimized syntax for 'or'. WTF is wrong with you, SQL?? def __init__(self, attribute, negate, likeParts): self.negate = negate self.attribute = attribute self.likeParts = likeParts def getInvolvedTables(self): tables = [self.attribute.type] for lf in self.likeParts: tables.extend([ t for t in lf.getLikeTables() if t not in tables]) return tables def getQuery(self, store): if self.negate: op = 'NOT LIKE' else: op = 'LIKE' sqlParts = [lf.getLikeQuery(store) for lf in self.likeParts] sql = '(%s %s (%s))' % (self.attribute.getColumnName(store), op, ' || '.join(sqlParts)) return sql def getArgs(self, store): l = [] for lf in self.likeParts: for pyval in lf.getLikeArgs(): l.append( self.attribute.infilter( pyval, None, store)) return l class AggregateComparison: """ Abstract base class for compound comparisons that aggregate other comparisons - currently only used for AND and OR comparisons. """ implements(IComparison) operator = None def __init__(self, *conditions): self.conditions = conditions if self.operator is None: raise NotImplementedError, ('%s cannot be used; you want AND or OR.' % self.__class__.__name__) if not conditions: raise ValueError, ('%s condition requires at least one argument' % self.operator) def getQuery(self, store): oper = ' %s ' % self.operator return '(%s)' % oper.join( [condition.getQuery(store) for condition in self.conditions]) def getArgs(self, store): args = [] for cond in self.conditions: args += cond.getArgs(store) return args def getInvolvedTables(self): tables = [] for cond in self.conditions: tables.extend([ t for t in cond.getInvolvedTables() if t not in tables]) return tables def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr, self.conditions))) class SequenceComparison: implements(IComparison) def __init__(self, attribute, container, negate): self.attribute = attribute self.container = container self.negate = negate if IColumn.providedBy(container): self.containerClause = self._columnContainer self.getArgs = self._columnArgs elif IQuery.providedBy(container): self.containerClause = self._queryContainer self.getArgs = self._queryArgs else: self.containerClause = self._sequenceContainer self.getArgs = self._sequenceArgs def _columnContainer(self, store): """ Return the fully qualified name of the column being examined so as to push all of the containment testing into the database. """ return self.container.getColumnName(store) def _columnArgs(self, store): """ The IColumn form of this has no arguments, just a column name specified in the SQL, specified by _columnContainer. """ return [] _subselectSQL = None _subselectArgs = None def _queryContainer(self, store): """ Generate and cache the subselect SQL and its arguments. Return the subselect SQL. """ if self._subselectSQL is None: sql, args = self.container._sqlAndArgs('SELECT', self.container._queryTarget) self._subselectSQL, self._subselectArgs = sql, args return self._subselectSQL def _queryArgs(self, store): """ Make sure subselect arguments have been generated and then return them. """ self._queryContainer(store) return self._subselectArgs _sequence = None def _sequenceContainer(self, store): """ Smash whatever we got into a list and save the result in case we are executed multiple times. This keeps us from tripping up over generators and the like. """ if self._sequence is None: self._sequence = list(self.container) self._clause = ', '.join(['?'] * len(self._sequence)) return self._clause def _sequenceArgs(self, store): """ Filter each element of the data using the attribute type being tested for containment and hand back the resulting list. """ self._sequenceContainer(store) # Force _sequence to be valid return [self.attribute.infilter(pyval, None, store) for pyval in self._sequence] # IComparison - getArgs is assigned as an instance attribute def getQuery(self, store): return '%s %sIN (%s)' % ( self.attribute.getColumnName(store), self.negate and 'NOT ' or '', self.containerClause(store)) def getInvolvedTables(self): return [self.attribute.type] class AND(AggregateComparison): """ Combine 2 L{IComparison}s such that this is true when both are true. """ operator = 'AND' class OR(AggregateComparison): """ Combine 2 L{IComparison}s such that this is true when either is true. """ operator = 'OR' class TableOrderComparisonWrapper(object): """ Wrap any other L{IComparison} and override its L{getInvolvedTables} method to specify the same tables but in an explicitly specified order. """ implements(IComparison) tables = None comparison = None def __init__(self, tables, comparison): assert set(tables) == set(comparison.getInvolvedTables()) self.tables = tables self.comparison = comparison def getInvolvedTables(self): return self.tables def getQuery(self, store): return self.comparison.getQuery(store) def getArgs(self, store): return self.comparison.getArgs(store) class boolean(SQLAttribute): sqltype = 'BOOLEAN' def infilter(self, pyval, oself, store): if pyval is None: return None if pyval is True: return 1 elif pyval is False: return 0 else: raise TypeError("attribute [%s.%s = boolean()] must be True or False; not %r" % (self.classname, self.attrname, type(pyval).__name__,)) def outfilter(self, dbval, oself): if dbval == 1: return True elif dbval == 0: return False elif self.allowNone and dbval is None: return None else: raise ValueError( "attribute [%s.%s = boolean()] " "must have a database value of 1 or 0; not %r" % (self.classname, self.attrname, dbval)) TOO_BIG = (2 ** 63)-1 class ConstraintError(TypeError): """A type constraint was violated. """ def __init__(self, attributeObj, requiredTypes, providedValue): self.attributeObj = attributeObj self.requiredTypes = requiredTypes self.providedValue = providedValue TypeError.__init__(self, "attribute [%s.%s = %s()] must be " "(%s); not %r" % (attributeObj.classname, attributeObj.attrname, attributeObj.__class__.__name__, requiredTypes, type(providedValue).__name__)) def requireType(attributeObj, value, typerepr, *types): if not isinstance(value, types): raise ConstraintError(attributeObj, typerepr, value) inttyperepr = "integer less than %r" % (TOO_BIG,) class integer(SQLAttribute): sqltype = 'INTEGER' def infilter(self, pyval, oself, store): if pyval is None: return None requireType(self, pyval, inttyperepr, int, long) if pyval > TOO_BIG: raise ConstraintError( self, inttyperepr, pyval) return pyval class bytes(SQLAttribute): """ Attribute representing a sequence of bytes; this is represented in memory as a Python 'str'. """ sqltype = 'BLOB' def infilter(self, pyval, oself, store): if pyval is None: return None if isinstance(pyval, unicode): raise ConstraintError(self, "str or other byte buffer", pyval) return buffer(pyval) def outfilter(self, dbval, oself): if dbval is None: return None return str(dbval) class InvalidPathError(ValueError): """ A path that could not be used with the database was attempted to be used with the database. """ class text(SQLAttribute): """ Attribute representing a sequence of characters; this is represented in memory as a Python 'unicode'. """ def __init__(self, caseSensitive=False, **kw): SQLAttribute.__init__(self, **kw) if caseSensitive: self.sqltype = 'TEXT' else: self.sqltype = 'TEXT COLLATE NOCASE' self.caseSensitive = caseSensitive def infilter(self, pyval, oself, store): if pyval is None: return None if not isinstance(pyval, unicode) or u'\0' in pyval: raise ConstraintError( self, "unicode string without NULL bytes", pyval) return pyval def outfilter(self, dbval, oself): return dbval class textlist(text): delimiter = u'\u001f' # Once upon a time, textlist encoded the list in such a way that caused [] # to be indistinguishable from [u'']. This value is now used as a # placeholder at the head of the list, to avoid this problem in a way that # is almost completely backwards-compatible with older databases. guard = u'\u0002' def outfilter(self, dbval, oself): unicodeString = super(textlist, self).outfilter(dbval, oself) if unicodeString is None: return None val = unicodeString.split(self.delimiter) if val[:1] == [self.guard]: del val[:1] return val def infilter(self, pyval, oself, store): if pyval is None: return None for innerVal in pyval: assert self.delimiter not in innerVal and self.guard not in innerVal result = self.delimiter.join([self.guard] + list(pyval)) return super(textlist, self).infilter(result, oself, store) class path(text): """ Attribute representing a pathname in the filesystem. If 'relative=True', the default, the representative pathname object must be somewhere inside the store, and will migrate with the store. I expect L{twisted.python.filepath.FilePath} or compatible objects as my values. """ def __init__(self, relative=True, **kw): text.__init__(self, **kw) self.relative = True def prepareInsert(self, oself, store): """ Prepare for insertion into the database by making the dbunderlying attribute of the item a relative pathname with respect to the store rather than an absolute pathname. """ if self.relative: fspath = self.__get__(oself) oself.__dirty__[self.attrname] = self, self.infilter(fspath, oself, store) def infilter(self, pyval, oself, store): if pyval is None: return None mypath = unicode(pyval.path) if store is None: store = oself.store if store is None: return None if self.relative: # XXX add some more filepath APIs to make this kind of checking easier. storepath = os.path.normpath(store.filesdir.path) mysegs = mypath.split(os.sep) storesegs = storepath.split(os.sep) if len(mysegs) <= len(storesegs) or mysegs[:len(storesegs)] != storesegs: raise InvalidPathError('%s not in %s' % (mypath, storepath)) # In the database we use '/' to separate paths for portability. # This databaes could have relative paths created on Windows, then # be moved to Linux for deployment, and what *was* the native # os.sep (backslash) will not be friendly to Linux's filesystem. # However, this is only for relative paths, since absolute or UNC # pathnames on a Windows system are inherently unportable and it's # not reasonable to calculate relative paths outside the store. p = '/'.join(mysegs[len(storesegs):]) else: p = mypath # we already know it's absolute, it came from a # filepath. return super(path, self).infilter(p, oself, store) def outfilter(self, dbval, oself): if dbval is None: return None if self.relative: fp = oself.store.filesdir for segment in dbval.split('/'): fp = fp.child(segment) else: fp = filepath.FilePath(dbval) return fp MICRO = 1000000. class timestamp(integer): """ An in-database representation of date and time. To make formatting as easy as possible, this is represented in Python as an instance of L{epsilon.extime.Time}; see its documentation for more details. """ def infilter(self, pyval, oself, store): if pyval is None: return None return integer.infilter(self, int(pyval.asPOSIXTimestamp() * MICRO), oself, store) def outfilter(self, dbval, oself): if dbval is None: return None return Time.fromPOSIXTimestamp(dbval / MICRO) _cascadingDeletes = {} _disallows = {} class reference(integer): NULLIFY = object() DISALLOW = object() CASCADE = object() def __init__(self, doc='', indexed=True, allowNone=True, reftype=None, whenDeleted=NULLIFY): integer.__init__(self, doc, indexed, None, allowNone) assert whenDeleted in (reference.NULLIFY, reference.CASCADE, reference.DISALLOW),( "whenDeleted must be one of: " "reference.NULLIFY, reference.CASCADE, reference.DISALLOW") self.reftype = reftype self.whenDeleted = whenDeleted if whenDeleted is reference.CASCADE: # Note; this list is technically in a slightly inconsistent state # as things are being built. _cascadingDeletes.setdefault(reftype, []).append(self) if whenDeleted is reference.DISALLOW: _disallows.setdefault(reftype, []).append(self) def reprFor(self, oself): obj = getattr(oself, self.underlying, None) if obj is not None: if obj.storeID is not None: return 'reference(%d)' % (obj.storeID,) else: return 'reference(unstored@%d)' % (id(obj),) sid = getattr(oself, self.dbunderlying, None) if sid is None: return 'None' return 'reference(%d)' % (sid,) def __get__(self, oself, cls=None): """ Override L{integer.__get__} to verify that the value to be returned is currently a valid item in the same store, and to make sure that legacy items are upgraded if they happen to have been cached. """ rv = super(reference, self).__get__(oself, cls) if rv is self: # If it's an attr lookup on the class, just do that. return self if rv is None: return rv if not rv._currentlyValidAsReferentFor(oself.store): # Make sure it's currently valid, i.e. it's not going to be deleted # this transaction or it hasn't been deleted. # XXX TODO: drop cached in-memory referent if it's been deleted / # no longer valid. assert self.whenDeleted is reference.NULLIFY, ( "not sure what to do if not...") return None if rv.__legacy__: delattr(oself, self.underlying) return super(reference, self).__get__(oself, cls) return rv def prepareInsert(self, oself, store): oitem = super(reference, self).__get__(oself) # bypass NULLIFY if oitem is not None and oitem.store is not store: raise NoCrossStoreReferences( "Trying to insert item: %r into store: %r, " "but it has a reference to other item: .%s=%r " "in another store: %r" % ( oself, store, self.attrname, oitem, oitem.store)) def infilter(self, pyval, oself, store): if pyval is None: return None if oself is None: return pyval.storeID if oself.store is None: return pyval.storeID if oself.store != pyval.store: raise NoCrossStoreReferences( "You can't establish references to items in other stores.") return integer.infilter(self, pyval.storeID, oself, store) def outfilter(self, dbval, oself): if dbval is None: return None referee = oself.store.getItemByID(dbval, default=None, autoUpgrade=not oself.__legacy__) if referee is None and self.whenDeleted is not reference.NULLIFY: # If referee merely changed to another valid referent, # SQLAttribute.__get__ will notice that what we returned is # inconsistent and try again. However, it doesn't know about the # BrokenReference that is raised if the old referee is no longer a # valid referent. Check to see if the dbunderlying is still the # same as the dbval passed in. If it's different, we should try to # load the value again. Only if it is unchanged will we raise the # BrokenReference. It would be better if all of this # change-detection logic were in one place, but I can't figure out # how to do that. -exarkun if dbval != getattr(oself, self.dbunderlying): return self.__get__(oself, None) raise BrokenReference('Reference to storeID %r is broken' % (dbval,)) return referee class ieee754_double(SQLAttribute): """ From the SQLite documentation:: Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes: (...) REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number. This attribute type implements IEEE754 double-precision binary floating-point storage. Some people call this 'float', and think it is somehow related to numbers. This assumption can be misleading when working with certain types of data. This attribute name has an unweildy name on purpose. You should be aware of the caveats related to binary floating point math before using this type. It is particularly ill-advised to use it to store values representing large amounts of currency as rounding errors may be significant enough to introduce accounting discrepancies. Certain edge-cases are not handled properly. For example, INF and NAN are considered by SQLite to be equal to everything, rather than the Python interpretation where INF is equal only to itself and greater than everything, and NAN is equal to nothing, not even itself. """ sqltype = 'REAL' def infilter(self, pyval, oself, store): if pyval is None: return None requireType(self, pyval, 'float', float) return pyval def outfilter(self, dbval, oself): return dbval class AbstractFixedPointDecimal(integer): """ Attribute representing a number with a specified number of decimal places. This is stored in SQLite as a binary integer multiplied by M{10**N} where C{N} is the number of decimal places required by Python. Therefore, in-database multiplication, division, or queries which compare to integers or fixedpointdecimals with a different number of decimal places, will not work. Also, you cannot store, or sum to, fixed point decimals greater than M{(2**63)/(10**N)}. While L{ieee754_double} is handy for representing various floating-point numbers, such as scientific measurements, this class (and the associated Python decimal class) is more appropriate for arithmetic on sums of money. For more information on Python's U{Decimal class} and on general U{computerized Decimal math in general}. This is currently a private helper superclass because we cannot store additional metadata about column types; maybe we should fix that. @cvar decimalPlaces: the number of points of decimal precision allowed by the storage and retrieval of this class. *Points beyond this number will be silently truncated to values passed into the database*, so be sure to select a value appropriate to your application! """ def __init__(self, **kw): integer.__init__(self, **kw) def infilter(self, pyval, oself, store): if pyval is None: return None if isinstance(pyval, (int, long)): pyval = Decimal(pyval) if isinstance(pyval, Decimal): # Python < 2.5.2 compatibility: # Use to_integral instead of to_integral_value. dbval = int((pyval * 10**self.decimalPlaces).to_integral()) return super(AbstractFixedPointDecimal, self).infilter( dbval, oself, store) else: raise TypeError( "attribute [%s.%s = AbstractFixedPointDecimal(...)] must be " "Decimal instance; not %r" % ( self.classname, self.attrname, type(pyval).__name__)) def outfilter(self, dbval, oself): if dbval is None: return None return Decimal(dbval) / 10**self.decimalPlaces def compare(self, other, sqlop): if isinstance(other, Comparable): if isinstance(other, AbstractFixedPointDecimal): if other.decimalPlaces == self.decimalPlaces: # fall through to default behavior at bottom pass else: raise TypeError( "Can't compare Decimals of varying precisions: " "(%s.%s %s %s.%s)" % ( self.classname, self.attrname, sqlop, other.classname, other.attrname )) else: raise TypeError( "Can't compare Decimals to other things: " "(%s.%s %s %s.%s)" % ( self.classname, self.attrname, sqlop, other.classname, other.attrname )) return super(AbstractFixedPointDecimal, self).compare(other, sqlop) class point1decimal(AbstractFixedPointDecimal): decimalPlaces = 1 class point2decimal(AbstractFixedPointDecimal): decimalPlaces = 2 class point3decimal(AbstractFixedPointDecimal): decimalPlaces = 3 class point4decimal(AbstractFixedPointDecimal): decimalPlaces = 4 class point5decimal(AbstractFixedPointDecimal): decimalPlaces = 5 class point6decimal(AbstractFixedPointDecimal): decimalPlaces = 6 class point7decimal(AbstractFixedPointDecimal): decimalPlaces = 7 class point8decimal(AbstractFixedPointDecimal): decimalPlaces = 8 class point9decimal(AbstractFixedPointDecimal): decimalPlaces = 9 class point10decimal(AbstractFixedPointDecimal): decimalPlaces = 10 class money(point4decimal): """ I am a 4-point precision fixed-point decimal number column type; suggested for representing a quantity of money. (This does not, however, include features such as currency.) """ Axiom-0.6.0/axiom/batch.py0000644000175000017500000011353111224737657015300 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_batch -*- """ Utilities for performing repetitive tasks over potentially large sets of data over an extended period of time. """ import weakref, datetime, os, sys from zope.interface import implements from twisted.python import reflect, failure, log, procutils, util, runtime from twisted.internet import task, defer, reactor, error, protocol from twisted.application import service from epsilon import extime, process, cooperator, modal, juice from axiom import iaxiom, errors as eaxiom, item, attributes from axiom.scheduler import Scheduler, SubScheduler from axiom.upgrade import registerUpgrader, registerDeletionUpgrader from axiom.dependency import installOn VERBOSE = False _processors = weakref.WeakValueDictionary() class _NoWorkUnits(Exception): """ Raised by a _ReliableListener's step() method to indicate it didn't do anything. """ class _ProcessingFailure(Exception): """ Raised when processItem raises any exception. This is never raised directly, but instances of the three subclasses are. """ def __init__(self, reliableListener, workUnit, failure): Exception.__init__(self) self.reliableListener = reliableListener self.workUnit = workUnit self.failure = failure # Get rid of all references this failure is holding so that it doesn't # cause any crazy object leaks. See also the comment in # BatchProcessingService.step's except suite. self.failure.cleanFailure() def mark(self): """ Mark the unit of work as failed in the database and update the listener so as to skip it next time. """ self.reliableListener.lastRun = extime.Time() BatchProcessingError( store=self.reliableListener.store, processor=self.reliableListener.processor, listener=self.reliableListener.listener, item=self.workUnit, error=self.failure.getErrorMessage()) class _ForwardProcessingFailure(_ProcessingFailure): """ An error occurred in a reliable listener while processing items forward from the mark. """ def mark(self): _ProcessingFailure.mark(self) self.reliableListener.forwardMark = self.workUnit.storeID class _BackwardProcessingFailure(_ProcessingFailure): """ An error occurred in a reliable listener while processing items backwards from the mark. """ def mark(self): _ProcessingFailure.mark(self) self.reliableListener.backwardMark = self.workUnit.storeID class _TrackedProcessingFailure(_ProcessingFailure): """ An error occurred in a reliable listener while processing items specially added to the batch run. """ class BatchProcessingError(item.Item): processor = attributes.reference(doc=""" The batch processor which owns this failure. """) listener = attributes.reference(doc=""" The listener which caused this error. """) item = attributes.reference(doc=""" The item which actually failed to be processed. """) error = attributes.bytes(doc=""" The error message which was associated with this failure. """) class _ReliableTracker(item.Item): """ A tracking item for an out-of-sequence item which a reliable listener should be given to process. These are created when L{_ReliableListener.addItem} is called and the specified item is in the range of items which have already been processed. """ processor = attributes.reference(doc=""" The batch processor which owns this tracker. """) listener = attributes.reference(doc=""" The listener which is responsible for this tracker's item. """) item = attributes.reference(doc=""" The item which this is tracking. """) class _ReliableListener(item.Item): processor = attributes.reference(doc=""" The batch processor which owns this listener. """) listener = attributes.reference(doc=""" The item which is actually the listener. """) backwardMark = attributes.integer(doc=""" Store ID of the first Item after the next Item to be processed in the backwards direction. Usually, the Store ID of the Item previously processed in the backwards direction. """) forwardMark = attributes.integer(doc=""" Store ID of the first Item before the next Item to be processed in the forwards direction. Usually, the Store ID of the Item previously processed in the forwards direction. """) lastRun = attributes.timestamp(doc=""" Time indicating the last chance given to this listener to do some work. """) style = attributes.integer(doc=""" Either L{iaxiom.LOCAL} or L{iaxiom.REMOTE}. Indicates where the batch processing should occur, in the main process or a subprocess. """) def __repr__(self): return '' % ({iaxiom.REMOTE: 'remote', iaxiom.LOCAL: 'local'}[self.style], self.listener, self.storeID) def addItem(self, item): assert type(item) is self.processor.workUnitType, \ "Adding work unit of type %r to listener for type %r" % ( type(item), self.processor.workUnitType) if item.storeID >= self.backwardMark and item.storeID <= self.forwardMark: _ReliableTracker(store=self.store, listener=self, item=item) def _forwardWork(self, workUnitType): if VERBOSE: log.msg("%r looking forward from %r" % (self, self.forwardMark,)) return self.store.query( workUnitType, workUnitType.storeID > self.forwardMark, sort=workUnitType.storeID.ascending, limit=2) def _backwardWork(self, workUnitType): if VERBOSE: log.msg("%r looking backward from %r" % (self, self.backwardMark,)) if self.backwardMark == 0: return [] return self.store.query( workUnitType, workUnitType.storeID < self.backwardMark, sort=workUnitType.storeID.descending, limit=2) def _extraWork(self): return self.store.query(_ReliableTracker, _ReliableTracker.listener == self, limit=2) def _doOneWork(self, workUnit, failureType): if VERBOSE: log.msg("Processing a unit of work: %r" % (workUnit,)) try: self.listener.processItem(workUnit) except: f = failure.Failure() if VERBOSE: log.msg("Processing failed: %s" % (f.getErrorMessage(),)) log.err(f) raise failureType(self, workUnit, f) def step(self): first = True for workTracker in self._extraWork(): if first: first = False else: return True item = workTracker.item workTracker.deleteFromStore() self._doOneWork(item, _TrackedProcessingFailure) for workUnit in self._forwardWork(self.processor.workUnitType): if first: first = False else: return True self.forwardMark = workUnit.storeID self._doOneWork(workUnit, _ForwardProcessingFailure) for workUnit in self._backwardWork(self.processor.workUnitType): if first: first = False else: return True self.backwardMark = workUnit.storeID self._doOneWork(workUnit, _BackwardProcessingFailure) if first: raise _NoWorkUnits() if VERBOSE: log.msg("%r.step() returning False" % (self,)) return False class _BatchProcessorMixin: def step(self, style=iaxiom.LOCAL, skip=()): now = extime.Time() first = True for listener in self.store.query(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == style, _ReliableListener.listener.notOneOf(skip)), sort=_ReliableListener.lastRun.ascending): if not first: if VERBOSE: log.msg("Found more work to do, returning True from %r.step()" % (self,)) return True listener.lastRun = now try: if listener.step(): if VERBOSE: log.msg("%r.step() reported more work to do, returning True from %r.step()" % (listener, self)) return True except _NoWorkUnits: if VERBOSE: log.msg("%r.step() reported no work units" % (listener,)) else: first = False if VERBOSE: log.msg("No listeners left with work, returning False from %r.step()" % (self,)) return False def run(self): """ Try to run one unit of work through one listener. If there are more listeners or more work, reschedule this item to be run again in C{self.busyInterval} milliseconds, otherwise unschedule it. @rtype: L{extime.Time} or C{None} @return: The next time at which to run this item, used by the scheduler for automatically rescheduling, or None if there is no more work to do. """ now = extime.Time() if self.step(): self.scheduled = now + datetime.timedelta(milliseconds=self.busyInterval) else: self.scheduled = None return self.scheduled def timedEventErrorHandler(self, timedEvent, failureObj): failureObj.trap(_ProcessingFailure) log.msg("Batch processing failure") log.err(failureObj.value.failure) failureObj.value.mark() return extime.Time() + datetime.timedelta(milliseconds=self.busyInterval) def addReliableListener(self, listener, style=iaxiom.LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. """ existing = self.store.findUnique(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener), default=None) if existing is not None: return existing for work in self.store.query(self.workUnitType, sort=self.workUnitType.storeID.descending, limit=1): forwardMark = work.storeID backwardMark = work.storeID + 1 break else: forwardMark = 0 backwardMark = 0 if self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) return _ReliableListener(store=self.store, processor=self, listener=listener, forwardMark=forwardMark, backwardMark=backwardMark, style=style) def removeReliableListener(self, listener): """ Remove a previously added listener. """ self.store.query(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener)).deleteFromStore() self.store.query(BatchProcessingError, attributes.AND(BatchProcessingError.processor == self, BatchProcessingError.listener == listener)).deleteFromStore() def getReliableListeners(self): """ Return an iterable of the listeners which have been added to this batch processor. """ for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self): yield rellist.listener def getFailedItems(self): """ Return an iterable of two-tuples of listeners which raised an exception from C{processItem} and the item which was passed as the argument to that method. """ for failed in self.store.query(BatchProcessingError, BatchProcessingError.processor == self): yield (failed.listener, failed.item) def itemAdded(self): """ Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. """ localCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.LOCAL), limit=1).count() if localCount and self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) def upgradeProcessor1to2(oldProcessor): """ Batch processors stopped polling at version 2, so they no longer needed the idleInterval attribute. They also gained a scheduled attribute which tracks their interaction with the scheduler. Since they stopped polling, we also set them up as a timed event here to make sure that they don't silently disappear, never to be seen again: running them with the scheduler gives them a chance to figure out what's up and set up whatever other state they need to continue to run. Since this introduces a new dependency of all batch processors on a powerup for the IScheduler, install a Scheduler or a SubScheduler if one is not already present. """ newProcessor = oldProcessor.upgradeVersion( oldProcessor.typeName, 1, 2, busyInterval=oldProcessor.busyInterval) newProcessor.scheduled = extime.Time() s = newProcessor.store sch = iaxiom.IScheduler(s, None) if sch is None: if s.parent is None: # Only site stores have no parents. sch = Scheduler(store=s) else: # Substores get subschedulers. sch = SubScheduler(store=s) installOn(sch, s) # And set it up to run. sch.schedule(newProcessor, newProcessor.scheduled) return newProcessor def processor(forType): """ Create an Axiom Item type which is suitable to use as a batch processor for the given Axiom Item type. Processors created this way depend on a L{iaxiom.IScheduler} powerup on the on which store they are installed. @type forType: L{item.MetaItem} @param forType: The Axiom Item type for which to create a batch processor type. @rtype: L{item.MetaItem} @return: An Axiom Item type suitable for use as a batch processor. If such a type previously existed, it will be returned. Otherwise, a new type is created. """ MILLI = 1000 if forType not in _processors: def __init__(self, *a, **kw): item.Item.__init__(self, *a, **kw) self.store.powerUp(self, iaxiom.IBatchProcessor) attrs = { '__name__': 'Batch_' + forType.__name__, '__module__': forType.__module__, '__init__': __init__, '__repr__': lambda self: '' % (reflect.qual(self.workUnitType), self.storeID), 'schemaVersion': 2, 'workUnitType': forType, 'scheduled': attributes.timestamp(doc=""" The next time at which this processor is scheduled to run. """, default=None), # MAGIC NUMBERS AREN'T THEY WONDERFUL? 'busyInterval': attributes.integer(doc="", default=MILLI / 10), } _processors[forType] = item.MetaItem( attrs['__name__'], (item.Item, _BatchProcessorMixin), attrs) registerUpgrader( upgradeProcessor1to2, _processors[forType].typeName, 1, 2) return _processors[forType] class ProcessUnavailable(Exception): """Indicates the process is not available to perform tasks. This is a transient error. Calling code should handle it by arranging to do the work they planned on doing at a later time. """ class Shutdown(juice.Command): """ Abandon, belay, cancel, cease, close, conclude, cut it out, desist, determine, discontinue, drop it, end, finish, finish up, give over, go amiss, go astray, go wrong, halt, have done with, hold, knock it off, lay off, leave off, miscarry, perorate, quit, refrain, relinquish, renounce, resolve, scrap, scratch, scrub, stay, stop, terminate, wind up. """ commandName = "Shutdown" responseType = juice.QuitBox def _childProcTerminated(self, err): self.mode = 'stopped' err = ProcessUnavailable(err) for d in self.waitingForProcess: d.errback(err) del self.waitingForProcess class ProcessController(object): """ Stateful class which tracks a Juice connection to a child process. Communication occurs over stdin and stdout of the child process. The process is launched and restarted as necessary. Failures due to the child process terminating, either unilaterally of by request, are represented as a transient exception class, Mode is one of:: - 'stopped' (no process running or starting) - 'starting' (process begun but not ready for requests) - 'ready' (process ready for requests) - 'stopping' (process being torn down) - 'waiting_ready' (process beginning but will be shut down as soon as it starts up) Transitions are as follows:: getProcess: stopped -> starting: launch process create/save in waitingForStartup/return Deferred starting -> starting: create/save/return Deferred ready -> ready: return saved process stopping: return failing Deferred indicating transient failure waiting_ready: return failing Deferred indicating transient failure stopProcess: stopped -> stopped: return succeeding Deferred starting -> waiting_ready: create Deferred, add transient failure errback handler, return ready -> stopping: call shutdown on process return Deferred which fires when shutdown is done childProcessCreated: starting -> ready: callback saved Deferreds clear saved Deferreds waiting_ready: errback saved Deferred indicating transient failure return _shutdownIndexerProcess() childProcessTerminated: starting -> stopped: errback saved Deferreds indicating transient failure waiting_ready -> stopped: errback saved Deferreds indicating transient failure ready -> stopped: drop reference to process object stopping -> stopped: Callback saved shutdown deferred @ivar process: A reference to the process object. Set in every non-stopped mode. @ivar juice: A reference to the juice protocol. Set in all modes. @ivar connector: A reference to the process protocol. Set in every non-stopped mode. @ivar onProcessStartup: None or a no-argument callable which will be invoked whenever the connection is first established to a newly spawned child process. @ivar onProcessTermination: None or a no-argument callable which will be invoked whenever a Juice connection is lost, except in the case where process shutdown was explicitly requested via stopProcess(). """ __metaclass__ = modal.ModalType initialMode = 'stopped' modeAttribute = 'mode' # A reference to the Twisted process object which corresponds to # the child process we have spawned. Set to a non-None value in # every state except stopped. process = None # A reference to the process protocol object via which we # communicate with the process's stdin and stdout. Set to a # non-None value in every state except stopped. connector = None def __init__(self, name, juice, tacPath, onProcessStartup=None, onProcessTermination=None, logPath=None, pidPath=None): self.name = name self.juice = juice self.tacPath = tacPath self.onProcessStartup = onProcessStartup self.onProcessTermination = onProcessTermination if logPath is None: logPath = name + '.log' if pidPath is None: pidPath = name + '.pid' self.logPath = logPath self.pidPath = pidPath def _startProcess(self): executable = sys.executable env = os.environ env['PYTHONPATH'] = os.pathsep.join(sys.path) twistdBinaries = procutils.which("twistd2.4") + procutils.which("twistd") if not twistdBinaries: return defer.fail(RuntimeError("Couldn't find twistd to start subprocess")) twistd = twistdBinaries[0] setsid = procutils.which("setsid") self.connector = JuiceConnector(self.juice, self) args = [ sys.executable, twistd, '--logfile=%s' % (self.logPath,)] if not runtime.platform.isWindows(): args.append('--pidfile=%s' % (self.pidPath,)) args.extend(['-noy', self.tacPath]) if setsid: args = ['setsid'] + args executable = setsid[0] self.process = process.spawnProcess( self.connector, executable, tuple(args), env=env) class stopped(modal.mode): def getProcess(self): self.mode = 'starting' self.waitingForProcess = [] self._startProcess() # Mode has changed, this will call some other # implementation of getProcess. return self.getProcess() def stopProcess(self): return defer.succeed(None) class starting(modal.mode): def getProcess(self): d = defer.Deferred() self.waitingForProcess.append(d) return d def stopProcess(self): def eb(err): err.trap(ProcessUnavailable) d = defer.Deferred().addErrback(eb) self.waitingForProcess.append(d) self.mode = 'waiting_ready' return d def childProcessCreated(self): self.mode = 'ready' if self.onProcessStartup is not None: self.onProcessStartup() for d in self.waitingForProcess: d.callback(self.juice) del self.waitingForProcess def childProcessTerminated(self, reason): _childProcTerminated(self, reason) if self.onProcessTermination is not None: self.onProcessTermination() class ready(modal.mode): def getProcess(self): return defer.succeed(self.juice) def stopProcess(self): self.mode = 'stopping' self.onShutdown = defer.Deferred() Shutdown().do(self.juice) return self.onShutdown def childProcessTerminated(self, reason): self.mode = 'stopped' self.process = self.connector = None if self.onProcessTermination is not None: self.onProcessTermination() class stopping(modal.mode): def getProcess(self): return defer.fail(ProcessUnavailable("Shutting down")) def stopProcess(self): return self.onShutdown def childProcessTerminated(self, reason): self.mode = 'stopped' self.process = self.connector = None self.onShutdown.callback(None) class waiting_ready(modal.mode): def getProcess(self): return defer.fail(ProcessUnavailable("Shutting down")) def childProcessCreated(self): # This will put us into the stopped state - no big deal, # we are going into the ready state as soon as it returns. _childProcTerminated(self, RuntimeError("Shutting down")) # Dip into the ready mode for ever so brief an instant so # that we can shut ourselves down. self.mode = 'ready' return self.stopProcess() def childProcessTerminated(self, reason): _childProcTerminated(self, reason) if self.onProcessTermination is not None: self.onProcessTermination() class JuiceConnector(protocol.ProcessProtocol): def __init__(self, proto, controller): self.juice = proto self.controller = controller def connectionMade(self): log.msg("Subprocess started.") self.juice.makeConnection(self) self.controller.childProcessCreated() # Transport disconnecting = False def write(self, data): self.transport.write(data) def writeSequence(self, data): self.transport.writeSequence(data) def loseConnection(self): self.transport.loseConnection() def getPeer(self): return ('omfg what are you talking about',) def getHost(self): return ('seriously it is a process this makes no sense',) def inConnectionLost(self): log.msg("Standard in closed") protocol.ProcessProtocol.inConnectionLost(self) def outConnectionLost(self): log.msg("Standard out closed") protocol.ProcessProtocol.outConnectionLost(self) def errConnectionLost(self): log.msg("Standard err closed") protocol.ProcessProtocol.errConnectionLost(self) def outReceived(self, data): self.juice.dataReceived(data) def errReceived(self, data): log.msg("Received stderr from subprocess: " + repr(data)) def processEnded(self, status): log.msg("Process ended") self.juice.connectionLost(status) self.controller.childProcessTerminated(status) class JuiceChild(juice.Juice): """ Protocol class which runs in the child process This just defines one behavior on top of a regular juice protocol: the shutdown command, which drops the connection and stops the reactor. """ shutdown = False def connectionLost(self, reason): juice.Juice.connectionLost(self, reason) if self.shutdown: reactor.stop() def command_SHUTDOWN(self): log.msg("Shutdown message received, goodbye.") self.shutdown = True return {} command_SHUTDOWN.command = Shutdown class SetStore(juice.Command): """ Specify the location of the site store. """ commandName = 'Set-Store' arguments = [('storepath', juice.Path())] class SuspendProcessor(juice.Command): """ Prevent a particular reliable listener from receiving any notifications until a L{ResumeProcessor} command is sent or the batch process is restarted. """ commandName = 'Suspend-Processor' arguments = [('storepath', juice.Path()), ('storeid', juice.Integer())] class ResumeProcessor(juice.Command): """ Cause a particular reliable listener to begin receiving notifications again. """ commandName = 'Resume-Processor' arguments = [('storepath', juice.Path()), ('storeid', juice.Integer())] class CallItemMethod(juice.Command): """ Invoke a particular method of a particular item. """ commandName = 'Call-Item-Method' arguments = [('storepath', juice.Path()), ('storeid', juice.Integer()), ('method', juice.String())] class BatchProcessingControllerService(service.Service): """ Controls starting, stopping, and passing messages to the system process in charge of remote batch processing. """ def __init__(self, store): self.store = store self.setName("Batch Processing Controller") def startService(self): service.Service.startService(self) tacPath = util.sibpath(__file__, "batch.tac") proto = BatchProcessingProtocol() rundir = self.store.dbdir.child("run") logdir = rundir.child("logs") for d in rundir, logdir: try: d.createDirectory() except OSError: pass self.batchController = ProcessController( "batch", proto, tacPath, self._setStore, self._restartProcess, logdir.child("batch.log").path, rundir.child("batch.pid").path) def _setStore(self): return SetStore(storepath=self.store.dbdir).do(self.batchController.juice) def _restartProcess(self): reactor.callLater(1.0, self.batchController.getProcess) def stopService(self): service.Service.stopService(self) d = self.batchController.stopProcess() d.addErrback(lambda err: err.trap(error.ProcessDone)) return d def call(self, itemMethod): """ Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. """ item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do) def suspend(self, storepath, storeID): return self.batchController.getProcess().addCallback( SuspendProcessor(storepath=storepath, storeid=storeID).do) def resume(self, storepath, storeID): return self.batchController.getProcess().addCallback( ResumeProcessor(storepath=storepath, storeid=storeID).do) class _SubStoreBatchChannel(object): """ SubStore adapter for passing messages to the batch processing system process. SubStores are adaptable to L{iaxiom.IBatchService} via this adapter. """ implements(iaxiom.IBatchService) def __init__(self, substore): self.storepath = substore.dbdir self.service = iaxiom.IBatchService(substore.parent) def call(self, itemMethod): return self.service.call(itemMethod) def suspend(self, storeID): return self.service.suspend(self.storepath, storeID) def resume(self, storeID): return self.service.resume(self.storepath, storeID) def storeBatchServiceSpecialCase(st, pups): if st.parent is not None: return _SubStoreBatchChannel(st) return service.IService(st).getServiceNamed("Batch Processing Controller") class BatchProcessingProtocol(JuiceChild): siteStore = None def __init__(self, service=None, issueGreeting=False): juice.Juice.__init__(self, issueGreeting) self.storepaths = [] if service is not None: service.cooperator = cooperator.Cooperator() self.service = service def connectionLost(self, reason): # In the child process, we are a server. In the child process, we # don't want to keep running after we can't talk to the client anymore. if self.isServer: reactor.stop() def command_SET_STORE(self, storepath): from axiom import store assert self.siteStore is None self.siteStore = store.Store(storepath, debug=False) self.subStores = {} self.pollCall = task.LoopingCall(self._pollSubStores) self.pollCall.start(10.0) return {} command_SET_STORE.command = SetStore def command_SUSPEND_PROCESSOR(self, storepath, storeid): return self.subStores[storepath.path].suspend(storeid).addCallback(lambda ign: {}) command_SUSPEND_PROCESSOR.command = SuspendProcessor def command_RESUME_PROCESSOR(self, storepath, storeid): return self.subStores[storepath.path].resume(storeid).addCallback(lambda ign: {}) command_RESUME_PROCESSOR.command = ResumeProcessor def command_CALL_ITEM_METHOD(self, storepath, storeid, method): return self.subStores[storepath.path].call(storeid, method).addCallback(lambda ign: {}) command_CALL_ITEM_METHOD.command = CallItemMethod def _pollSubStores(self): from axiom import store, substore # Any service which has encountered an error will have logged it and # then stopped. Prune those here, so that they are noticed as missing # below and re-added. for path, svc in self.subStores.items(): if not svc.running: del self.subStores[path] try: paths = set([p.path for p in self.siteStore.query(substore.SubStore).getColumn("storepath")]) except eaxiom.SQLError, e: # Generally, database is locked. log.msg("SubStore query failed with SQLError: %r" % (e,)) except: # WTF? log.msg("SubStore query failed with bad error:") log.err() else: for removed in set(self.subStores) - paths: self.subStores[removed].disownServiceParent() del self.subStores[removed] if VERBOSE: log.msg("Removed SubStore " + removed) for added in paths - set(self.subStores): try: s = store.Store(added, debug=False) except eaxiom.SQLError, e: # Generally, database is locked. log.msg("Opening sub-Store failed with SQLError: %r" % (e,)) except: log.msg("Opening sub-Store failed with bad error:") log.err() else: self.subStores[added] = BatchProcessingService(s, style=iaxiom.REMOTE) self.subStores[added].setServiceParent(self.service) if VERBOSE: log.msg("Added SubStore " + added) class BatchProcessingService(service.Service): """ Steps over the L{iaxiom.IBatchProcessor} powerups for a single L{axiom.store.Store}. """ def __init__(self, store, style=iaxiom.LOCAL): self.store = store self.style = style self.suspended = [] def suspend(self, storeID): item = self.store.getItemByID(storeID) self.suspended.append(item) return item.suspend() def resume(self, storeID): item = self.store.getItemByID(storeID) self.suspended.remove(item) return item.resume() def call(self, storeID, methodName): return defer.maybeDeferred(getattr(self.store.getItemByID(storeID), methodName)) def items(self): return self.store.powerupsFor(iaxiom.IBatchProcessor) def processWhileRunning(self): """ Run tasks until stopService is called. """ work = self.step() for result, more in work: yield result if not self.running: break if more: delay = 0.1 else: delay = 10.0 yield task.deferLater(reactor, delay, lambda: None) def step(self): while True: items = list(self.items()) if VERBOSE: log.msg("Found %d processors for %s" % (len(items), self.store)) ran = False more = False while items: ran = True item = items.pop() if VERBOSE: log.msg("Stepping processor %r (suspended is %r)" % (item, self.suspended)) try: itemHasMore = item.store.transact(item.step, style=self.style, skip=self.suspended) except _ProcessingFailure, e: log.msg("%r failed while processing %r:" % (e.reliableListener, e.workUnit)) log.err(e.failure) e.mark() # _Fuck_. /Fuck/. If user-code in or below (*fuck*) # item.step creates a Failure on any future iteration # (-Fuck-) of this loop, it will get a reference to this # exception instance, since it's in locals and Failures # extract and save locals (Aaarrrrggg). Get rid of this so # that doesn't happen. See also the definition of # _ProcessingFailure.__init__. e = None else: if itemHasMore: more = True yield None, bool(more or items) if not ran: yield None, more def startService(self): service.Service.startService(self) self.parent.cooperator.coiterate(self.processWhileRunning()) def stopService(self): service.Service.stopService(self) self.store.close() class BatchManholePowerup(item.Item): """ Previously, an L{IConchUser} powerup. This class is only still defined for schema compatibility. Any instances of it will be deleted by an upgrader. See #1001. """ schemaVersion = 2 unused = attributes.integer( doc="Satisfy Axiom requirement for at least one attribute") registerDeletionUpgrader(BatchManholePowerup, 1, 2) Axiom-0.6.0/axiom/batch.tac0000644000175000017500000000120110424160712015364 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_batch -*- """ Application configuration for the batch sub-process. This process reads commands and sends responses via stdio using the JUICE protocol. When it's not doing that, it queries various databases for work to do, and then does it. The databases which it queries can be controlled by sending it messages. """ from twisted.application import service from twisted.internet import stdio from axiom import batch application = service.Application("Batch Processing App") svc = service.MultiService() svc.setServiceParent(application) stdio.StandardIO(batch.BatchProcessingProtocol(svc, True)) Axiom-0.6.0/axiom/dependency.py0000644000175000017500000002440011050455516016315 0ustar exarkunexarkun# Copright 2008 Divmod, Inc. See LICENSE file for details. # -*- test-case-name: axiom.test.test_dependency -*- """ A dependency management system for items. """ import sys, itertools from zope.interface.advice import addClassAdvisor from epsilon.structlike import record from axiom.item import Item from axiom.attributes import reference, boolean, AND from axiom.errors import ItemNotFound, DependencyError, UnsatisfiedRequirement #There is probably a cleaner way to do this. _globalDependencyMap = {} def dependentsOf(cls): deps = _globalDependencyMap.get(cls, None) if deps is None: return [] else: return [d[0] for d in deps] ##Totally ripping off z.i def dependsOn(itemType, itemCustomizer=None, doc='', indexed=True, whenDeleted=reference.NULLIFY): """ This function behaves like L{axiom.attributes.reference} but with an extra behaviour: when this item is installed (via L{axiom.dependency.installOn} on a target item, the type named here will be instantiated and installed on the target as well. For example:: class Foo(Item): counter = integer() thingIDependOn = dependsOn(Baz, lambda baz: baz.setup()) @param itemType: The Item class to instantiate and install. @param itemCustomizer: A callable that accepts the item installed as a dependency as its first argument. It will be called only if an item is created to satisfy this dependency. @return: An L{axiom.attributes.reference} instance. """ frame = sys._getframe(1) locals = frame.f_locals # Try to make sure we were called from a class def. if (locals is frame.f_globals) or ('__module__' not in locals): raise TypeError("dependsOn can be used only from a class definition.") ref = reference(reftype=itemType, doc=doc, indexed=indexed, allowNone=True, whenDeleted=whenDeleted) if "__dependsOn_advice_data__" not in locals: addClassAdvisor(_dependsOn_advice) locals.setdefault('__dependsOn_advice_data__', []).append( (itemType, itemCustomizer, ref)) return ref def _dependsOn_advice(cls): if cls in _globalDependencyMap: print "Double advising of %s. dependency map from first time: %s" % ( cls, _globalDependencyMap[cls]) #bail if we end up here twice, somehow return cls for itemType, itemCustomizer, ref in cls.__dict__[ '__dependsOn_advice_data__']: classDependsOn(cls, itemType, itemCustomizer, ref) del cls.__dependsOn_advice_data__ return cls def classDependsOn(cls, itemType, itemCustomizer, ref): _globalDependencyMap.setdefault(cls, []).append( (itemType, itemCustomizer, ref)) class _DependencyConnector(Item): """ I am a connector between installed items and their targets. """ installee = reference(doc="The item installed.") target = reference(doc="The item installed upon.") explicitlyInstalled = boolean(doc="Whether this item was installed" "explicitly (and thus whether or not it" "should be automatically uninstalled when" "nothing depends on it)") def installOn(self, target): """ Install this object on the target along with any powerup interfaces it declares. Also track that the object now depends on the target, and the object was explicitly installed (and therefore should not be uninstalled by subsequent uninstallation operations unless it is explicitly removed). """ _installOn(self, target, True) def _installOn(self, target, __explicitlyInstalled=False): depBlob = _globalDependencyMap.get(self.__class__, []) dependencies, itemCustomizers, refs = (map(list, zip(*depBlob)) or ([], [], [])) #See if any of our dependencies have been installed already for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): if dc.installee.__class__ in dependencies: i = dependencies.index(dc.installee.__class__) refs[i].__set__(self, dc.installee) del dependencies[i], itemCustomizers[i], refs[i] if (dc.installee.__class__ == self.__class__ and self.__class__ in set( itertools.chain([blob[0][0] for blob in _globalDependencyMap.values()]))): #Somebody got here before we did... let's punt raise DependencyError("An instance of %r is already " "installed on %r." % (self.__class__, target)) #The rest we'll install for i, cls in enumerate(dependencies): it = cls(store=self.store) if itemCustomizers[i] is not None: itemCustomizers[i](it) _installOn(it, target, False) refs[i].__set__(self, it) #And now the connector for our own dependency. dc = self.store.findUnique( _DependencyConnector, AND(_DependencyConnector.target==target, _DependencyConnector.installee==self, _DependencyConnector.explicitlyInstalled==__explicitlyInstalled), None) assert dc is None, "Dependency connector already exists, wtf are you doing?" _DependencyConnector(store=self.store, target=target, installee=self, explicitlyInstalled=__explicitlyInstalled) target.powerUp(self) callback = getattr(self, "installed", None) if callback is not None: callback() def uninstallFrom(self, target): """ Remove this object from the target, as well as any dependencies that it automatically installed which were not explicitly "pinned" by calling "install", and raising an exception if anything still depends on this. """ #did this class powerup on any interfaces? powerdown if so. target.powerDown(self) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: dc.deleteFromStore() for item in installedUniqueRequirements(self, target): uninstallFrom(item, target) callback = getattr(self, "uninstalled", None) if callback is not None: callback() def installedOn(self): """ If this item is installed on another item, return the install target. Otherwise return None. """ try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None def installedDependents(self, target): """ Return an iterable of things installed on the target that require this item. """ for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: yield dc.installee def installedUniqueRequirements(self, target): """ Return an iterable of things installed on the target that this item requires and are not required by anything else. """ myDepends = dependentsOf(self.__class__) #XXX optimize? for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: #we're checking all the others not ourself continue depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: raise DependencyError( "%r cannot be uninstalled from %r, " "%r still depends on it" % (self, target, dc.installee)) for cls in myDepends[:]: #If one of my dependencies is required by somebody #else, leave it alone if cls in depends: myDepends.remove(cls) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if (dc.installee.__class__ in myDepends and not dc.explicitlyInstalled): yield dc.installee def installedRequirements(self, target): """ Return an iterable of things installed on the target that this item requires. """ myDepends = dependentsOf(self.__class__) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): if dc.installee.__class__ in myDepends: yield dc.installee def onlyInstallPowerups(self, target): """ Deprecated - L{Item.powerUp} now has this functionality. """ target.powerUp(self) class requiresFromSite( record('powerupInterface defaultFactory siteDefaultFactory', defaultFactory=None, siteDefaultFactory=None)): """ A read-only descriptor that will return the site store's powerup for a given item. @ivar powerupInterface: an L{Interface} describing the powerup that the site store should be adapted to. @ivar defaultFactory: a 1-argument callable that takes the site store and returns a value for this descriptor. This is invoked in cases where the site store does not provide a default factory of its own, and this descriptor is retrieved from an item in a store with a parent. @ivar siteDefaultFactory: a 1-argument callable that takes the site store and returns a value for this descriptor. This is invoked in cases where this descriptor is retrieved from an item in a store without a parent. """ def _invokeFactory(self, defaultFactory, siteStore): if defaultFactory is None: raise UnsatisfiedRequirement() return defaultFactory(siteStore) def __get__(self, oself, type=None): """ Retrieve the value of this dependency from the site store. """ siteStore = oself.store.parent if siteStore is not None: pi = self.powerupInterface(siteStore, None) if pi is None: pi = self._invokeFactory(self.defaultFactory, siteStore) else: pi = self._invokeFactory(self.siteDefaultFactory, oself.store) return pi Axiom-0.6.0/axiom/errors.py0000644000175000017500000001072111127421367015516 0ustar exarkunexarkun# -*- test-case-name: axiom.test -*- from twisted.cred.error import UnauthorizedLogin class TimeoutError(Exception): """ A low-level SQL operation timed out. @ivar statement: The SQL statement which timed out. @ivar timeout: The timeout, in seconds, which was exceeded. @ivar underlying: The backend exception which signaled this, or None. """ def __init__(self, statement, timeout, underlying): Exception.__init__(self, statement, timeout, underlying) self.statement = statement self.timeout = timeout self.underlying = underlying class BadCredentials(UnauthorizedLogin): pass class NoSuchUser(UnauthorizedLogin): pass class MissingDomainPart(NoSuchUser): """ Raised when a login is attempted with a username which consists of only a local part. For example, "testuser" instead of "testuser@example.com". """ class DuplicateUser(Exception): pass class CannotOpenStore(RuntimeError): """ There is a problem such that the store cannot be opened. """ class NoUpgradePathAvailable(CannotOpenStore): """ No upgrade path is available, so the store cannot be opened. """ class NoCrossStoreReferences(AttributeError): """ References are not allowed between items within different Stores. """ class SQLError(RuntimeError): """ Axiom internally generated some bad SQL. """ def __init__(self, sql, args, underlying): RuntimeError.__init__(self, sql, args, underlying) self.sql, self.args, self.underlying = self.args def __str__(self): return "" % ( self.sql, self.args, self.underlying.__class__, self.underlying) class TableAlreadyExists(SQLError): """ Axiom internally created a table at the same time as another database. (User code should not need to catch this exception.) """ class UnknownItemType(Exception): """ Can't load an item: it's of a type that I don't see anywhere in Python. """ class SQLWarning(Warning): """ Axiom internally generated some CREATE TABLE SQL that ... probably wasn't bad """ class TableCreationConcurrencyError(RuntimeError): """ Woah, this is really bad. If you can get this please tell us how. """ class DuplicateUniqueItem(KeyError): """ Found 2 or more of an item which is supposed to be unique. """ class ItemNotFound(KeyError): """ Did not find even 1 of an item which was supposed to exist. """ class ItemClassesOnly(TypeError): """ An object was passed to a method that wasn't a subclass of Item. """ class ChangeRejected(Exception): """ Raised when an attempt is made to change the database at a time when database changes are disallowed for reasons of consistency. This is raised when an application-level callback (for example, committed) attempts to change database state. """ class DependencyError(Exception): """ Raised when an item can't be installed or uninstalled. """ class DeletionDisallowed(ValueError): """ Raised when an attempt is made to delete an item that is referred to by reference attributes with whenDeleted == DISALLOW. """ class DataIntegrityError(RuntimeError): """ Data integrity seems to have been lost. """ class BrokenReference(DataIntegrityError): """ A reference to a nonexistent item was detected when this should be impossible. """ class UpgraderRecursion(RuntimeError): """ Upgraders are not allowed to recurse. """ class ItemUpgradeError(RuntimeError): """ Attempting to upgrade an Item resulted in an error. @ivar originalFailure: The failure that caused the item upgrade to fail @ivar storeID: Store ID of the item that failed to upgrade @ivar oldType: The type of the item being upgraded @ivar newType: The type the item should've been upgraded to """ def __init__(self, originalFailure, storeID, oldType, newType): RuntimeError.__init__(self, originalFailure, storeID, oldType, newType) self.originalFailure = originalFailure self.storeID = storeID self.oldType = oldType self.newType = newType class UnsatisfiedRequirement(AttributeError): """ A requirement described by a L{axiom.dependency.requiresFromSite} was not satisfied by the database, and could not be satisfied automatically at runtime by a default factory. """ Axiom-0.6.0/axiom/iaxiom.py0000644000175000017500000002363311224737657015510 0ustar exarkunexarkun from zope.interface import Interface, Attribute class IStatEvent(Interface): """ Marker for a log message that is useful as a statistic. Log messages with 'interface' set to this class will be made available to external observers. This is useful for tracking the rate of events such as page views. """ class IAtomicFile(Interface): def __init__(tempname, destdir): """Create a new atomic file. The file will exist temporarily at C{tempname} and be relocated to C{destdir} when it is closed. """ def tell(): """Return the current offset into the file, in bytes. """ def write(bytes): """Write some bytes to this file. """ def close(callback): """Close this file. Move it to its final location. @param callback: A no-argument callable which will be invoked when this file is ready to be moved to its final location. It must return the segment of the path relative to per-user storage of the owner of this file. Alternatively, a string with semantics the same as those previously described for the return value of the callable. @rtype: C{axiom.store.StoreRelativePath} @return: A Deferred which fires with the full path to the file when it has been closed, or which fails if there is some error closing the file. """ def abort(): """Give up on this file. Discard its contents. """ class IAxiomaticCommand(Interface): """ Subcommand for 'axiomatic' and 'tell-axiom' command line programs. Should subclass twisted.python.usage.Options and provide a command to run. '.parent' attribute will be set to an object with a getStore method. """ name = Attribute(""" """) description = Attribute(""" """) class IBeneficiary(Interface): """ Interface to adapt to when looking for an appropriate application-level object to install powerups on. """ def powerUp(implementor, interface): """ Install a powerup on this object. There is not necessarily any inverse powerupsFor on a beneficiary, although there may be; installations may be forwarded to a different implementation object, or deferred. """ class IPowerupIndirector(Interface): """ Implement this interface if you want to change what is returned from powerupsFor for a particular interface. """ def indirect(interface): """ When an item which implements IPowerupIndirector is returned from a powerupsFor query, this method will be called on it to give it the opportunity to return something other than itself from powerupsFor. @param interface: the interface passed to powerupsFor @type interface: L{zope.interface.Interface} """ class IScheduler(Interface): """ An interface for scheduling tasks. Quite often the store will be adaptable to this; in any Mantissa application, for example; so it is reasonable to assume that it is if your application needs to schedule timed events or queue tasks. """ def schedule(runnable, when): """ @param runnable: any Item with a 'run' method. @param when: a Time instance describing when the runnable's run() method will be called. See extime.Time's documentation for more details. """ class IQuery(Interface): """ An object that represents a query that can be performed against a database. """ limit = Attribute( """ An integer representing the maximum number of rows to be returned from this query, or None, if the query is unlimited. """) store = Attribute( """ The Axiom store that this query will return results from. """) def __iter__(): """ Retrieve an iterator for the results of this query. The query is performed whenever this is called. """ def count(): """ Return the number of results in this query. NOTE: In most cases, this will have to load all of the rows in this query. It is therefore very slow and should generally be considered discouraged. Call with caution! """ def cloneQuery(limit): """ Create a similar-but-not-identical copy of this query with certain attributes changed. (Currently this only supports the manipulation of the "limit" parameter, but it is the intent that with a richer query-introspection interface, this signature could be expanded to support many different attributes.) @param limit: an integer, representing the maximum number of rows that this query should return. @return: an L{IQuery} provider with the new limit. """ class IColumn(Interface): """ An object that represents a column in the database. """ def getShortColumnName(store): """ @rtype: C{str} @return: Just the name of this column. """ def getColumnName(store): """ @rtype: C{str} @return: The fully qualified name of this object as a column within the database, eg, C{"main_database.some_table.[this_column]"}. """ def fullyQualifiedName(): """ @rtype: C{str} @return: The fully qualfied name of this object as an attribute in Python code, eg, C{myproject.mymodule.MyClass.myAttribute}. If this attribute is represented by an actual Python code object, it will be a dot-separated sequence of Python identifiers; otherwise, it will contain invalid identifier characters other than '.'. """ def __get__(row): """ @param row: an item that has this column. @type row: L{axiom.item.Item} @return: The value of the column described by this object, for the given row. @rtype: depends on the underlying type of the column. """ class IOrdering(Interface): """ An object suitable for passing to the 'sort' argument of a query method. """ def orderColumns(): """ Return a list of two-tuples of IColumn providers and either C{'ASC'} or C{'DESC'} defining this ordering. """ class IComparison(Interface): """ An object that represents an in-database comparison. A predicate that may apply to certain items in a store. Passed as an argument to attributes.AND, .OR, and Store.query(...) """ def getInvolvedTables(): """ Return a sequence of L{Item} subclasses which are referenced by this comparison. A class may appear at most once. """ def getQuery(store): """ Return an SQL string with ?-style bind parameter syntax thingies. """ def getArgs(store): """ Return a sequence of arguments suitable for use to satisfy the bind parameters in the result of L{getQuery}. """ class IReliableListener(Interface): """ Receives notification of the existence of Items of a particular type. {IReliableListener} providers are given to L{IBatchProcessor.addReliableListener} and will then have L{processItem} called with items handled by that processor. """ def processItem(item): """ Callback notifying this listener of the existence of the given item. """ def suspend(): """ Invoked when notification for this listener is being temporarily suspended. This should clean up any ephemeral resources held by this listener and generally prepare to not do anything for a while. """ def resume(): """ Invoked when notification for this listener is being resumed. Any actions taken by L{suspend} may be reversed by this method. """ LOCAL, REMOTE = range(2) class IBatchProcessor(Interface): def addReliableListener(listener, style=LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @type listener: L{IReliableListener} @param listener: The item to which listened-for items will be passed for processing. """ def removeReliableListener(listener): """ Remove a previously added listener. """ def getReliableListeners(): """ Return an iterable of the listeners which have been added to this batch processor. """ class IBatchService(Interface): """ Object which allows minimal communication with L{IReliableListener} providers which are running remotely (that is, with the L{REMOTE} style). """ def suspend(storeID): """ @type storeID: C{int} @param storeID: The storeID of the listener to suspend. @rtype: L{twisted.internet.defer.Deferred} @return: A Deferred which fires when the listener has been suspended. """ def resume(storeID): """ @type storeID: C{int} @param storeID: The storeID of the listener to resume. @rtype: L{twisted.internet.defer.Deferred} @return: A Deferred which fires when the listener has been resumed. """ class IVersion(Interface): """ Object with version information for a package that creates Axiom items, most likely a L{twisted.python.versions.Version}. Used to track which versions of a package have been used to load a store. """ package = Attribute(""" Name of a Python package. """) major = Attribute(""" Major version number. """) minor = Attribute(""" Minor version number. """) micro = Attribute(""" Micro version number. """) Axiom-0.6.0/axiom/item.py0000644000175000017500000012151611127007037015137 0ustar exarkunexarkun# -*- test-case-name: axiom.test -*- __metaclass__ = type import gc from zope.interface import implements, Interface from inspect import getabsfile from weakref import WeakValueDictionary from twisted.python import log from twisted.python.reflect import qual, namedAny from twisted.python.util import unsignedID from twisted.python.util import mergeFunctionMetadata from twisted.application.service import IService, IServiceCollection, MultiService from axiom import slotmachine, _schema, iaxiom from axiom.errors import ChangeRejected, DeletionDisallowed from axiom.iaxiom import IColumn, IPowerupIndirector from axiom.attributes import ( SQLAttribute, _ComparisonOperatorMuxer, _MatchingOperationMuxer, _OrderingMixin, _ContainableMixin, Comparable, compare, inmemory, reference, text, integer, AND, _cascadingDeletes, _disallows) _typeNameToMostRecentClass = WeakValueDictionary() def normalize(qualName): """ Turn a fully-qualified Python name into a string usable as part of a table name. """ return qualName.lower().replace('.', '_') class NoInheritance(RuntimeError): """ Inheritance is as-yet unsupported by XAtop. """ class NotInStore(RuntimeError): """ """ class CantInstantiateItem(RuntimeError): """You can't instantiate Item directly. Make a subclass. """ class MetaItem(slotmachine.SchemaMetaMachine): """Simple metaclass for Item that adds Item (and its subclasses) to _typeNameToMostRecentClass mapping. """ def __new__(meta, name, bases, dictionary): T = slotmachine.SchemaMetaMachine.__new__(meta, name, bases, dictionary) if T.__name__ == 'Item' and T.__module__ == __name__: return T T.__already_inherited__ += 1 if T.__already_inherited__ >= 2: raise NoInheritance("already inherited from item once: " "in-database inheritance not yet supported") if T.typeName is None: T.typeName = normalize(qual(T)) if T.schemaVersion is None: T.schemaVersion = 1 if T.typeName in _typeNameToMostRecentClass: # Let's try not to gc.collect() every time. gc.collect() if T.typeName in _typeNameToMostRecentClass: if T.__legacy__: return T otherT = _typeNameToMostRecentClass[T.typeName] if (otherT.__name__ == T.__name__ and getabsfile(T) == getabsfile(otherT) and T.__module__ != otherT.__module__): if len(T.__module__) < len(otherT.__module__): relmod = T.__module__ else: relmod = otherT.__module__ raise RuntimeError( "Use absolute imports; relative import" " detected for type %r (imported from %r)" % ( T.typeName, relmod)) raise RuntimeError("2 definitions of axiom typename %r: %r %r" % ( T.typeName, T, _typeNameToMostRecentClass[T.typeName])) _typeNameToMostRecentClass[T.typeName] = T return T def __cmp__(self, other): """ Ensure stable sorting between Item classes. This provides determinism in SQL generation, which is beneficial for debugging and performance purposes. """ if isinstance(other, MetaItem): return cmp((self.typeName, self.schemaVersion), (other.typeName, other.schemaVersion)) return NotImplemented def noop(): pass class _StoreIDComparer(Comparable): """ See Comparable's docstring for the explanation of the requirements of my implementation. """ implements(IColumn) def __init__(self, type): self.type = type def __repr__(self): return '' def fullyQualifiedName(self): # XXX: this is an example of silly redundancy, this really ought to be # refactored to work like any other attribute (including being # explicitly covered in the schema, which has other good qualities like # allowing tables to be VACUUM'd without destroying oid stability and # every storeID reference ever. --glyph return qual(self.type)+'.storeID' # attributes required by ColumnComparer def infilter(self, pyval, oself, store): return pyval def outfilter(self, dbval, oself): return dbval def getShortColumnName(self, store): return store.getShortColumnName(self) def getColumnName(self, store): return store.getColumnName(self) def __get__(self, item, type=None): if item is None: return self else: return getattr(item, 'storeID') class _SpecialStoreIDAttribute(slotmachine.SetOnce): """ Because storeID is special (it's unique, it determines a row's cache identity, it's immutable, etc) we don't use a regular SQLAttribute to represent it - but it still needs to be compared with other SQL attributes, as it is in fact represented by the 'oid' database column. I implement set-once semantics to enforce immutability, but delegate comparison operations to _StoreIDComparer. """ def __get__(self, oself, type=None): if type is not None and oself is None: if type._storeIDComparer is None: # Reuse the same instance so that the store can use it # as a key for various caching, like any other attributes. type._storeIDComparer = _StoreIDComparer(type) return type._storeIDComparer return super(_SpecialStoreIDAttribute, self).__get__(oself, type) def serviceSpecialCase(item, pups): if item._axiom_service is not None: return item._axiom_service svc = MultiService() for subsvc in pups: subsvc.setServiceParent(svc) item._axiom_service = svc return svc class Empowered(object): """ An object which can have powerups. @type store: L{axiom.store.Store} @ivar store: Persistence object to which powerups can be added for later retrieval. @type aggregateInterfaces: C{dict} @ivar aggregateInterfaces: Mapping from interface classes to callables which will be used to produce corresponding powerups. The callables will be invoked with two arguments, the L{Empowered} for which powerups are being loaded and with a list of powerups found in C{store}. The return value is the powerup. These are used only by the callable interface adaption API, not C{powerupsFor}. """ aggregateInterfaces = { IService: serviceSpecialCase, IServiceCollection: serviceSpecialCase} def inMemoryPowerUp(self, powerup, interface): """ Install an arbitrary object as a powerup on an item or store. Powerups installed using this method will only exist as long as this object remains in memory. They will also take precedence over powerups installed with L{powerUp}. @param interface: a zope interface """ self._inMemoryPowerups[interface] = powerup def powerUp(self, powerup, interface=None, priority=0): """ Installs a powerup (e.g. plugin) on an item or store. Powerups will be returned in an iterator when queried for using the 'powerupsFor' method. Normally they will be returned in order of installation [this may change in future versions, so please don't depend on it]. Higher priorities are returned first. If you have something that should run before "normal" powerups, pass POWERUP_BEFORE; if you have something that should run after, pass POWERUP_AFTER. We suggest not depending too heavily on order of execution of your powerups, but if finer-grained control is necessary you may pass any integer. Normal (unspecified) priority is zero. Powerups will only be installed once on a given item. If you install a powerup for a given interface with priority 1, then again with priority 30, the powerup will be adjusted to priority 30 but future calls to powerupFor will still only return that powerup once. If no interface or priority are specified, and the class of the powerup has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), this object will be powered up with the powerup object on those interfaces. If no interface or priority are specified and the powerup has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples, collected from the "powerupInterfaces" attribute described above. The iterable of (interface, priority) tuples it returns will then be installed. @param powerup: an Item that implements C{interface} (if specified) @param interface: a zope interface, or None @param priority: An int; preferably either POWERUP_BEFORE, POWERUP_AFTER, or unspecified. @raise TypeError: raises if interface is IPowerupIndirector You may not install a powerup for IPowerupIndirector because that would be nonsensical. """ if interface is None: for iface, priority in powerup._getPowerupInterfaces(): self.powerUp(powerup, iface, priority) elif interface is IPowerupIndirector: raise TypeError( "You cannot install a powerup for IPowerupIndirector: " + powerup) else: forc = self.store.findOrCreate(_PowerupConnector, item=self, interface=unicode(qual(interface)), powerup=powerup) forc.priority = priority def powerDown(self, powerup, interface=None): """ Remove a powerup. If no interface is specified, and the type of the object being installed has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), the target will be powered down with this object on those interfaces. If this object has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples. The iterable of (interface, priority) tuples it returns will then be uninstalled. (Note particularly that if powerups are added or removed to the collection described above between calls to powerUp and powerDown, more powerups or less will be removed than were installed.) """ if interface is None: for interface, priority in powerup._getPowerupInterfaces(): self.powerDown(powerup, interface) else: for cable in self.store.query(_PowerupConnector, AND(_PowerupConnector.item == self, _PowerupConnector.interface == unicode(qual(interface)), _PowerupConnector.powerup == powerup)): cable.deleteFromStore() return raise ValueError("Not powered up for %r with %r" % (interface, powerup)) def __conform__(self, interface): """ For 'normal' interfaces, returns the first powerup found when doing self.powerupsFor(interface). Certain interfaces are special - IService from twisted.application being the main special case - and will be aggregated according to special rules. The full list of such interfaces is present in the 'aggregateInterfaces' class attribute. """ if (self.store is None # Don't bother doing a *query* if we're not # even stored in a store yet or interface is IPowerupIndirector): # you can't do a query for # IPowerupIndirector, that # would just start an infinite # loop. return pups = self.powerupsFor(interface) agg = self.aggregateInterfaces if interface in agg: return agg[interface](self, pups) for p in pups: return p def powerupsFor(self, interface): """ Returns powerups installed using C{powerUp}, in order of descending priority. Powerups found to have been deleted, either during the course of this powerupsFor iteration, during an upgrader, or previously, will not be returned. """ inMemoryPowerup = self._inMemoryPowerups.get(interface, None) if inMemoryPowerup is not None: yield inMemoryPowerup name = unicode(qual(interface), 'ascii') for cable in self.store.query( _PowerupConnector, AND(_PowerupConnector.interface == name, _PowerupConnector.item == self), sort=_PowerupConnector.priority.descending): pup = cable.powerup if pup is None: # this powerup was probably deleted during an upgrader. cable.deleteFromStore() else: indirector = IPowerupIndirector(pup, None) if indirector is not None: yield indirector.indirect(interface) else: yield pup def interfacesFor(self, powerup): """ Return an iterator of the interfaces for which the given powerup is installed on this object. This is not implemented for in-memory powerups. It will probably fail in an unpredictable, implementation-dependent way if used on one. """ pc = _PowerupConnector for iface in self.store.query(pc, AND(pc.item == self, pc.powerup == powerup)).getColumn('interface'): yield namedAny(iface) def _getPowerupInterfaces(self): """ Collect powerup interfaces this object declares that it can be installed on. """ powerupInterfaces = getattr(self.__class__, "powerupInterfaces", ()) pifs = [] for x in powerupInterfaces: if isinstance(x, type(Interface)): #just an interface pifs.append((x, 0)) else: #an interface and a priority pifs.append(x) m = getattr(self, "__getPowerupInterfaces__", None) if m is not None: pifs = m(pifs) try: pifs = [(i, p) for (i, p) in pifs] except ValueError: raise ValueError("return value from %r.__getPowerupInterfaces__" " not an iterable of 2-tuples" % (self,)) return pifs def transacted(func): """ Return a callable which will invoke C{func} in a transaction using the C{store} attribute of the first parameter passed to it. Typically this is used to create Item methods which are automatically run in a transaction. The attributes of the returned callable will resemble those of C{func} as closely as L{twisted.python.util.mergeFunctionMetadata} can make them. """ def transactionified(item, *a, **kw): return item.store.transact(func, item, *a, **kw) return mergeFunctionMetadata(func, transactionified) def dependentItems(store, tableClass, comparisonFactory): """ Collect all the items that should be deleted when an item or items of a particular item type are deleted. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: An iterable of items to delete. """ for cascadingAttr in (_cascadingDeletes.get(tableClass, []) + _cascadingDeletes.get(None, [])): for cascadedItem in store.query(cascadingAttr.type, comparisonFactory(cascadingAttr)): yield cascadedItem def allowDeletion(store, tableClass, comparisonFactory): """ Returns a C{bool} indicating whether deletion of an item or items of a particular item type should be allowed to proceed. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: A C{bool} indicating whether deletion should be allowed. """ for cascadingAttr in (_disallows.get(tableClass, []) + _disallows.get(None, [])): for cascadedItem in store.query(cascadingAttr.type, comparisonFactory(cascadingAttr), limit=1): return False return True class Item(Empowered, slotmachine._Strict): # Python-Special Attributes __metaclass__ = MetaItem # Axiom-Special Attributes __dirty__ = inmemory() __legacy__ = False __already_inherited__ = 0 # Private attributes. __store = inmemory() # underlying reference to the store. __everInserted = inmemory() # has this object ever been inserted into the # database? __justCreated = inmemory() # was this object just created, i.e. is there # no committed database representation of it # yet __deleting = inmemory() # has this been marked for deletion at # checkpoint __deletingObject = inmemory() # being marked for deletion at checkpoint, # are we also deleting the central object row # (True: as in an actual delete) or are we # simply deleting the data row (False: as in # part of an upgrade) storeID = _SpecialStoreIDAttribute(default=None) _storeIDComparer = None _axiom_service = inmemory() # A mapping from interfaces to in-memory powerups. _inMemoryPowerups = inmemory() def _currentlyValidAsReferentFor(self, store): """ Is this object currently valid as a reference? Objects which will be deleted in this transaction, or objects which are not in the same store are not valid. See attributes.reference.__get__. """ if store is None: # If your store is None, you can refer to whoever you want. I'm in # a store but it doesn't matter that you're not. return True if self.store is not store: return False if self.__deletingObject: return False return True def _schemaPrepareInsert(self, store): """ Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory. """ for name, atr in self.getSchema(): atr.prepareInsert(self, store) def store(): def get(self): return self.__store def set(self, store): if self.__store is not None: raise AttributeError( "Store already set - can't move between stores") if store._rejectChanges: raise ChangeRejected() self._schemaPrepareInsert(store) self.__store = store oid = self.storeID = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(type(self))]) if not self.__legacy__: store.objectCache.cache(oid, self) if store.autocommit: log.msg(interface=iaxiom.IStatEvent, name='database', stat_autocommits=1) self.checkpoint() else: self.touch() self.activate() self.stored() return get, set, """ A reference to a Store; when set for the first time, inserts this object into that store. Cannot be set twice; once inserted, objects are 'stuck' to a particular store and must be copied by creating a new Item. """ store = property(*store()) def __repr__(self): """ Return a nice string representation of the Item which contains some information about each of its attributes. """ L = [self.__name__] L.append('(') A = [] for nam, atr in sorted(self.getSchema()): V = atr.reprFor(self) A.append('%s=%s' % (nam, V)) A.append('storeID=' + str(self.storeID)) L.append(', '.join(A)) L.append(')') L.append('@0x%X' % unsignedID(self)) return ''.join(L) def __subinit__(self, **kw): """ Initializer called regardless of whether this object was created by instantiation or loading from the database. """ self._axiom_service = None self._inMemoryPowerups = {} self.__dirty__ = {} to__store = kw.pop('__store', None) to__everInserted = kw.pop('__everInserted', False) to__justUpgraded = kw.pop('__justUpgraded', False) self.__store = to__store self.__everInserted = to__everInserted self.__deletingObject = False self.__deleting = False tostore = kw.pop('store',None) if not self.__everInserted: for (name, attr) in self.getSchema(): if name not in kw: kw[name] = attr.computeDefault() for k, v in kw.iteritems(): setattr(self, k, v) if tostore != None: if to__justUpgraded: # we can't just set the store, because that allocates an ID. # we do still need to do all the attribute prep, make sure # references refer to this store, paths are adjusted to point # to this store's static offset, etc. self._schemaPrepareInsert(tostore) self.__store = tostore # However, setting the store would normally cache this item as # well, so we need to cache it here - unless this is actually a # dummy class which isn't real! In that case don't. if not self.__legacy__: tostore.objectCache.cache(self.storeID, self) if tostore.autocommit: self.checkpoint() else: self.store = tostore def __init__(self, **kw): """ Create a new Item. This is called on an item *only* when it is being created for the first time, not when it is loaded from the database. The 'activate()' hook is called every time an item is loaded from the database, as well as the first time that an item is inserted into the store. This will be inside __init__ if you pass a 'store' keyword argument to an Item's constructor. This takes an arbitrary set of keyword arguments, which will be set as attributes on the created item. Subclasses of Item must honor this signature. """ if type(self) is Item: raise CantInstantiateItem() self.__justCreated = True self.__subinit__(**kw) def __finalizer__(self): return noop def existingInStore(cls, store, storeID, attrs): """Create and return a new instance from a row from the store.""" self = cls.__new__(cls) self.__justCreated = False self.__subinit__(__store=store, storeID=storeID, __everInserted=True) schema = self.getSchema() assert len(schema) == len(attrs), "invalid number of attributes" for data, (name, attr) in zip(attrs, schema): attr.loaded(self, data) self.activate() return self existingInStore = classmethod(existingInStore) def activate(self): """The object was loaded from the store. """ def getSchema(cls): """ return all persistent class attributes """ schema = [] for name, atr in cls.__attributes__: atr = atr.__get__(None, cls) if isinstance(atr, SQLAttribute): schema.append((name, atr)) cls.getSchema = staticmethod(lambda schema=schema: schema) return schema getSchema = classmethod(getSchema) def persistentValues(self): """ Return a dictionary of all attributes which will be/have been/are being stored in the database. """ return dict((k, getattr(self, k)) for (k, attr) in self.getSchema()) def touch(self): # xxx what if self.store is None: return self.store.changed(self) def revert(self): if self.__justCreated: # The SQL revert has already been taken care of. if not self.__legacy__: self.store.objectCache.uncache(self.storeID, self) return self.__dirty__.clear() dbattrs = self.store.querySQL( self._baseSelectSQL(self.store), [self.storeID])[0] for data, (name, atr) in zip(dbattrs, self.getSchema()): atr.loaded(self, data) self.__deleting = False self.__deletingObject = False def deleted(self): """User-definable callback that is invoked when an object is well and truly gone from the database; the transaction which deleted it has been committed. """ def stored(self): """ User-definable callback that is invoked when an object is placed into a Store for the very first time. If an Item is created with a store, this will be invoked I{after} C{activate}. """ def committed(self): """ Called after the database is brought into a consistent state with this object. """ if self.__deleting: self.deleted() if not self.__legacy__: self.store.objectCache.uncache(self.storeID, self) self.__store = None self.__justCreated = False def checkpoint(self): """ Update the database to reflect in-memory changes made to this item; for example, to make it show up in store.query() calls where it is now valid, but was not the last time it was persisted to the database. This is called automatically when in 'autocommit mode' (i.e. not in a transaction) and at the end of each transaction for every object that has been changed. """ if self.store is None: raise NotInStore("You can't checkpoint %r: not in a store" % (self,)) if self.__deleting: if not self.__everInserted: # don't issue duplicate SQL and crap; we were created, then # destroyed immediately. return self.store.executeSQL(self._baseDeleteSQL(self.store), [self.storeID]) # re-using OIDs plays havoc with the cache, and with other things # as well. We need to make sure that we leave a placeholder row at # the end of the table. if self.__deletingObject: # Mark this object as dead. self.store.executeSchemaSQL(_schema.CHANGE_TYPE, [-1, self.storeID]) # Can't do this any more: # self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID]) # TODO: need to measure the performance impact of this, then do # it to make sure things are in fact deleted: # self.store.executeSchemaSQL(_schema.APP_VACUUM) else: assert self.__legacy__ # we're done... if self.store.autocommit: self.committed() return if self.__everInserted: # case 1: we've been inserted before, either previously in this # transaction or we were loaded from the db if not self.__dirty__: # we might have been checkpointed twice within the same # transaction; just don't do anything. return self.store.executeSQL(*self._updateSQL()) else: # case 2: we are in the middle of creating the object, we've never # been inserted into the db before schemaAttrs = self.getSchema() insertArgs = [self.storeID] for (ignoredName, attrObj) in schemaAttrs: attrObjDuplicate, attributeValue = self.__dirty__[attrObj.attrname] # assert attrObjDuplicate is attrObj insertArgs.append(attributeValue) # XXX this isn't atomic, gross. self.store.executeSQL(self._baseInsertSQL(self.store), insertArgs) self.__everInserted = True # In case 1, we're dirty but we did an update, synchronizing the # database, in case 2, we haven't been created but we issue an insert. # In either case, the code in attributes.py sets the attribute *as well # as* populating __dirty__, so we clear out dirty and we keep the same # value, knowing it's the same as what's in the db. self.__dirty__.clear() if self.store.autocommit: self.committed() def upgradeVersion(self, typename, oldversion, newversion, **kw): # right now there is only ever one acceptable series of arguments here # but it is useful to pass them anyway to make sure the code is # functioning as expected assert typename == self.typeName, '%r != %r' % (typename, self.typeName) assert oldversion == self.schemaVersion key = typename, newversion T = None if key in _legacyTypes: T = _legacyTypes[key] elif typename in _typeNameToMostRecentClass: mostRecent = _typeNameToMostRecentClass[typename] if mostRecent.schemaVersion == newversion: T = mostRecent if T is None: raise RuntimeError("don't know about type/version pair %s:%d" % ( typename, newversion)) newTypeID = self.store.getTypeID(T) # call first to make sure the table # exists for doInsert below new = T(store=self.store, __justUpgraded=True, storeID=self.storeID, **kw) new.touch() new.activate() self.store.executeSchemaSQL(_schema.CHANGE_TYPE, [newTypeID, self.storeID]) self.deleteFromStore(False) return new def deleteFromStore(self, deleteObject=True): # go grab dependent stuff if deleteObject: if not allowDeletion(self.store, self.__class__, lambda attr: attr == self): raise DeletionDisallowed( 'Cannot delete item; ' 'has referents with whenDeleted == reference.DISALLOW') for dependent in dependentItems(self.store, self.__class__, lambda attr: attr == self): dependent.deleteFromStore() self.touch() self.__deleting = True self.__deletingObject = deleteObject if self.store.autocommit: self.checkpoint() # You may specify schemaVersion and typeName in subclasses schemaVersion = None typeName = None ###### SQL generation ###### def _baseSelectSQL(cls, st): if cls not in st.typeToSelectSQLCache: st.typeToSelectSQLCache[cls] = ' '.join(['SELECT * FROM', st.getTableName(cls), 'WHERE', st.getShortColumnName(cls.storeID), '= ?' ]) return st.typeToSelectSQLCache[cls] _baseSelectSQL = classmethod(_baseSelectSQL) def _baseInsertSQL(cls, st): if cls not in st.typeToInsertSQLCache: attrs = list(cls.getSchema()) qs = ', '.join((['?']*(len(attrs)+1))) st.typeToInsertSQLCache[cls] = ( 'INSERT INTO '+ st.getTableName(cls) + ' (' + ', '.join( [ st.getShortColumnName(cls.storeID) ] + [ st.getShortColumnName(a[1]) for a in attrs]) + ') VALUES (' + qs + ')') return st.typeToInsertSQLCache[cls] _baseInsertSQL = classmethod(_baseInsertSQL) def _baseDeleteSQL(cls, st): if cls not in st.typeToDeleteSQLCache: st.typeToDeleteSQLCache[cls] = ' '.join(['DELETE FROM', st.getTableName(cls), 'WHERE', st.getShortColumnName(cls.storeID), '= ? ' ]) return st.typeToDeleteSQLCache[cls] _baseDeleteSQL = classmethod(_baseDeleteSQL) def _updateSQL(self): # XXX no point in caching for every possible combination of attribute # values - probably. check out how prepared statements are used in # python sometime. dirty = self.__dirty__.items() if not dirty: raise RuntimeError("Non-dirty item trying to generate SQL.") dirty.sort() dirtyColumns = [] dirtyValues = [] for dirtyAttrName, (dirtyAttribute, dirtyValue) in dirty: dirtyColumns.append(self.store.getShortColumnName(dirtyAttribute)) dirtyValues.append(dirtyValue) stmt = ' '.join([ 'UPDATE', self.store.getTableName(self.__class__), 'SET', ', '.join(['%s = ?'] * len(dirty)) % tuple(dirtyColumns), 'WHERE ', self.store.getShortColumnName(type(self).storeID), ' = ?']) dirtyValues.append(self.storeID) return stmt, dirtyValues def getTableName(cls, store): """ Retrieve a string naming the database table associated with this item class. """ return store.getTableName(cls) getTableName = classmethod(getTableName) def getTableAlias(cls, store, currentAliases): return None getTableAlias = classmethod(getTableAlias) class _PlaceholderColumn(_ContainableMixin, _ComparisonOperatorMuxer, _MatchingOperationMuxer, _OrderingMixin): """ Wrapper for columns from a L{Placeholder} which provides a fully qualified name built with a table alias name instead of the underlying column's real table name. """ implements(IColumn) def __init__(self, placeholder, column): self.type = placeholder self.column = column def __repr__(self): return '' % (self.column,) def __get__(self, inst): return self.column.__get__(inst) def fullyQualifiedName(self): return self.column.fullyQualifiedName() + '.' % ( self.type._placeholderCount,) def compare(self, other, op): return compare(self, other, op) def getShortColumnName(self, store): return self.column.getShortColumnName(store) def getColumnName(self, store): assert self.type._placeholderTableAlias is not None, ( "Placeholder.getTableAlias() must be called " "before Placeholder.attribute.getColumnName()") return '%s.%s' % (self.type._placeholderTableAlias, self.column.getShortColumnName(store)) def infilter(self, pyval, oself, store): return self.column.infilter(pyval, oself, store) def outfilter(self, dbval, oself): return self.column.outfilter(dbval, oself) _placeholderCount = 0 class Placeholder(object): """ Wrap an existing L{Item} type to provide a different name for it. This can be used to join a table against itself which is useful for flattening normalized data. For example, given a schema defined like this:: class Tag(Item): taggedObject = reference() tagName = text() class SomethingElse(Item): ... It might be useful to construct a query for instances of SomethingElse which have been tagged both with C{"foo"} and C{"bar"}:: t1 = Placeholder(Tag) t2 = Placeholder(Tag) store.query(SomethingElse, AND(t1.taggedObject == SomethingElse.storeID, t1.tagName == u"foo", t2.taggedObject == SomethingElse.storeID, t2.tagName == u"bar")) """ _placeholderTableAlias = None def __init__(self, itemClass): global _placeholderCount self._placeholderItemClass = itemClass self._placeholderCount = _placeholderCount + 1 _placeholderCount += 1 self.existingInStore = self._placeholderItemClass.existingInStore def __cmp__(self, other): """ Provide a deterministic sort order between Placeholder instances. Those instantiated first will compare as less than than instantiated later. """ if isinstance(other, Placeholder): return cmp(self._placeholderCount, other._placeholderCount) return NotImplemented def __getattr__(self, name): if name == 'storeID' or name in dict(self._placeholderItemClass.getSchema()): return _PlaceholderColumn(self, getattr(self._placeholderItemClass, name)) raise AttributeError(name) def getSchema(self): # In a MultipleItemQuery, the same table can appear more than # once in the "SELECT ..." part of the query, determined by # getSchema(). In this case, the correct placeholder names # need to be used. schema = [] for (name, atr) in self._placeholderItemClass.getSchema(): schema.append(( name, _PlaceholderColumn( self, getattr(self._placeholderItemClass, name)))) return schema def getTableName(self, store): return self._placeholderItemClass.getTableName(store) def getTableAlias(self, store, currentAliases): if self._placeholderTableAlias is None: self._placeholderTableAlias = 'placeholder_' + str(len(currentAliases)) return self._placeholderTableAlias _legacyTypes = {} # map (typeName, schemaVersion) to dummy class def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()): """ Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class. """ if (typeName, schemaVersion) in _legacyTypes: return _legacyTypes[typeName, schemaVersion] if dummyBases: realBases = [declareLegacyItem(*A) for A in dummyBases] else: realBases = (Item,) attributes = attributes.copy() attributes['__module__'] = 'item_dummy' attributes['__legacy__'] = True attributes['typeName'] = typeName attributes['schemaVersion'] = schemaVersion result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)), realBases, attributes) assert result is not None, 'wtf, %r' % (type,) _legacyTypes[(typeName, schemaVersion)] = result return result class _PowerupConnector(Item): """ I am a connector between the store and a powerup. """ typeName = 'axiom_powerup_connector' powerup = reference() item = reference() interface = text() priority = integer() POWERUP_BEFORE = 1 # Priority for 'high' priority powerups. POWERUP_AFTER = -1 # Priority for 'low' priority powerups. Axiom-0.6.0/axiom/listversions.py0000644000175000017500000001126511053557573016761 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_listversions -*- from zope.interface import classProvides from twisted import plugin from twisted.python import usage, versions from axiom import iaxiom, item, attributes, plugins from axiom.scripts import axiomatic from epsilon.extime import Time class ListVersions(usage.Options, axiomatic.AxiomaticSubCommandMixin): """ Command for listing the version history of a store. """ classProvides(plugin.IPlugin, iaxiom.IAxiomaticCommand) name = "list-version" description = "Display software package version history." def postOptions(self): for line in listVersionHistory(self.parent.getStore()): print line class SystemVersion(item.Item): """ Represents a set of software package versions which, taken together, comprise a "system version" of the software that can have affected the contents of a Store. By recording the changes of these versions in the store itself we can better reconstruct its history later. """ creation = attributes.timestamp( doc="When this system version set was recorded.", allowNone=False) def __repr__(self): return '' % (self.creation,) def longWindedRepr(self): """ @return: A string representation of this SystemVersion suitable for display to the user. """ return '\n\t'.join( [repr(self)] + [repr(sv) for sv in self.store.query( SoftwareVersion, SoftwareVersion.systemVersion == self)]) class SoftwareVersion(item.Item): """ An Item subclass to map L{twisted.python.versions.Version} objects. """ systemVersion = attributes.reference( doc="The system version this package version was observed in.", allowNone=False) package = attributes.text(doc="The software package.", allowNone=False) version = attributes.text(doc="The version string of the software.", allowNone=False) major = attributes.integer(doc='Major version number.', allowNone=False) minor = attributes.integer(doc='Minor version number.', allowNone=False) micro = attributes.integer(doc='Micro version number.', allowNone=False) def asVersion(self): """ Convert the version data in this item to a L{twisted.python.versions.Version}. """ return versions.Version(self.package, self.major, self.minor, self.micro) def __repr__(self): return '' % (self.package, self.version) def makeSoftwareVersion(store, version, systemVersion): """ Return the SoftwareVersion object from store corresponding to the version object, creating it if it doesn't already exist. """ return store.findOrCreate(SoftwareVersion, systemVersion=systemVersion, package=unicode(version.package), version=unicode(version.short()), major=version.major, minor=version.minor, micro=version.micro) def listVersionHistory(store): """ List the software package version history of store. """ q = store.query(SystemVersion, sort=SystemVersion.creation.descending) return [sv.longWindedRepr() for sv in q] def getSystemVersions(getPlugins=plugin.getPlugins): """ Collect all the version plugins and extract their L{Version} objects. """ return list(getPlugins(iaxiom.IVersion, plugins)) def checkSystemVersion(s, versions=None): """ Check if the current version is different from the previously recorded version. If it is, or if there is no previously recorded version, create a version matching the current config. """ if versions is None: versions = getSystemVersions() currentVersionMap = dict([(v.package, v) for v in versions]) mostRecentSystemVersion = s.findFirst(SystemVersion, sort=SystemVersion.creation.descending) mostRecentVersionMap = dict([(v.package, v.asVersion()) for v in s.query(SoftwareVersion, (SoftwareVersion.systemVersion == mostRecentSystemVersion))]) if mostRecentVersionMap != currentVersionMap: currentSystemVersion = SystemVersion(store=s, creation=Time()) for v in currentVersionMap.itervalues(): makeSoftwareVersion(s, v, currentSystemVersion) Axiom-0.6.0/axiom/queryutil.py0000644000175000017500000001164011224366113016241 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_queryutil -*- import operator from axiom.attributes import AND, OR def contains(startAttribute, endAttribute, value): """ Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the 'comparison' argument to Store.query/.sum/.count) which will constrain a query against 2 attributes for ranges which contain the given argument. The range is half-open. """ return AND( startAttribute <= value, value < endAttribute) def overlapping(startAttribute, # X endAttribute, # Y startValue, # A endValue, # B ): """ Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the 'comparison' argument to Store.query/.sum/.count) which will constrain a query against 2 attributes for ranges which overlap with the given arguments. For a database with Items of class O which represent values in this configuration:: X Y (a) (b) |-------------------| (c) (d) |--------| (e) (f) |--------| (g) (h) |---| (i) (j) |------| (k) (l) |-------------------------------------| (a) (l) |-----------------------------| (c) (b) |------------------------| (c) (a) |----| (b) (l) |---------| The query:: myStore.query( O, findOverlapping(O.X, O.Y, a, b)) Will return a generator of Items of class O which represent segments a-b, c-d, e-f, k-l, a-l, c-b, c-a and b-l, but NOT segments g-h or i-j. (NOTE: If you want to pass attributes of different classes for startAttribute and endAttribute, read the implementation of this method to discover the additional join clauses required. This may be eliminated some day so for now, consider this method undefined over multiple classes.) In the database where this query is run, for an item N, all values of N.startAttribute must be less than N.endAttribute. startValue must be less than endValue. """ assert startValue <= endValue return OR( AND(startAttribute >= startValue, startAttribute <= endValue), AND(endAttribute >= startValue, endAttribute <= endValue), AND(startAttribute <= startValue, endAttribute >= endValue) ) def _tupleCompare(tuple1, ineq, tuple2, eq=lambda a,b: (a==b), ander=AND, orer=OR): """ Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query. """ orholder = [] for limit in range(len(tuple1)): eqconstraint = [ eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]] ineqconstraint = ineq(tuple1[limit], tuple2[limit]) orholder.append(ander(*(eqconstraint + [ineqconstraint]))) return orer(*orholder) def _tupleLessThan(tuple1, tuple2): return _tupleCompare(tuple1, operator.lt, tuple2) def _tupleGreaterThan(tuple1, tuple2): return _tupleCompare(tuple1, operator.gt, tuple2) class AttributeTuple(object): def __init__(self, *attributes): self.attributes = attributes def __iter__(self): return iter(self.attributes) def __eq__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return AND(*[ myAttr == otherAttr for (myAttr, otherAttr) in zip(self, other)]) def __ne__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return OR(*[ myAttr != otherAttr for (myAttr, otherAttr) in zip(self, other)]) def __gt__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return _tupleGreaterThan(tuple(iter(self)), other) def __lt__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return _tupleLessThan(tuple(iter(self)), other) def __ge__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return OR(self > other, self == other) def __le__(self, other): if not isinstance(other, (AttributeTuple, tuple, list)): return NotImplemented return OR(self < other, self == other) Axiom-0.6.0/axiom/scheduler.py0000644000175000017500000003735011224737657016201 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_scheduler -*- import warnings from zope.interface import implements from twisted.internet import reactor from twisted.application.service import IService, Service from twisted.python import log, failure from epsilon.extime import Time from axiom.iaxiom import IScheduler from axiom.item import Item, declareLegacyItem from axiom.attributes import AND, timestamp, reference, integer, inmemory, bytes from axiom.dependency import uninstallFrom from axiom.upgrade import registerUpgrader from axiom.substore import SubStore VERBOSE = False class TimedEventFailureLog(Item): typeName = 'timed_event_failure_log' schemaVersion = 1 desiredTime = timestamp() actualTime = timestamp() runnable = reference() traceback = bytes() class TimedEvent(Item): typeName = 'timed_event' schemaVersion = 1 time = timestamp(indexed=True) runnable = reference() running = inmemory(doc='True if this event is currently running.') def activate(self): self.running = False def _rescheduleFromRun(self, newTime): """ Schedule this event to be run at the indicated time, or if the indicated time is None, delete this event. """ if newTime is None: self.deleteFromStore() else: self.time = newTime def invokeRunnable(self): """ Run my runnable, and reschedule or delete myself based on its result. Must be run in a transaction. """ runnable = self.runnable if runnable is None: self.deleteFromStore() else: try: self.running = True newTime = runnable.run() finally: self.running = False self._rescheduleFromRun(newTime) def handleError(self, now, failureObj): """ An error occurred running my runnable. Check my runnable for an error-handling method called 'timedEventErrorHandler' that will take the given failure as an argument, and execute that if available: otherwise, create a TimedEventFailureLog with information about what happened to this event. Must be run in a transaction. """ errorHandler = getattr(self.runnable, 'timedEventErrorHandler', None) if errorHandler is not None: self._rescheduleFromRun(errorHandler(self, failureObj)) else: self._defaultErrorHandler(now, failureObj) def _defaultErrorHandler(self, now, failureObj): TimedEventFailureLog(store=self.store, desiredTime=self.time, actualTime=now, runnable=self.runnable, traceback=failureObj.getTraceback()) self.deleteFromStore() class _WackyControlFlow(Exception): def __init__(self, eventObject, failureObject): Exception.__init__(self, "User code failed during timed event") self.eventObject = eventObject self.failureObject = failureObject MAX_WORK_PER_TICK = 10 class SchedulerMixin: def _oneTick(self, now): theEvent = self._getNextEvent(now) if theEvent is None: return False try: theEvent.invokeRunnable() except: raise _WackyControlFlow(theEvent, failure.Failure()) self.lastEventAt = now return True def _getNextEvent(self, now): # o/` gonna party like it's 1984 o/` theEventL = list(self.store.query(TimedEvent, TimedEvent.time <= now, sort=TimedEvent.time.ascending, limit=1)) if theEventL: return theEventL[0] def tick(self): now = self.now() self.nextEventAt = None workBeingDone = True workUnitsPerformed = 0 errors = 0 while workBeingDone and workUnitsPerformed < MAX_WORK_PER_TICK: try: workBeingDone = self.store.transact(self._oneTick, now) except _WackyControlFlow, wcf: self.store.transact(wcf.eventObject.handleError, now, wcf.failureObject) log.err(wcf.failureObject) errors += 1 workBeingDone = True if workBeingDone: workUnitsPerformed += 1 x = list(self.store.query(TimedEvent, sort=TimedEvent.time.ascending, limit=1)) if x: self._transientSchedule(x[0].time, now) if errors or VERBOSE: log.msg("The scheduler ran %(eventCount)s events%(errors)s." % dict( eventCount=workUnitsPerformed, errors=(errors and (" (with %d errors)" % (errors,))) or '')) def schedule(self, runnable, when): TimedEvent(store=self.store, time=when, runnable=runnable) self._transientSchedule(when, self.now()) def reschedule(self, runnable, fromWhen, toWhen): for evt in self.store.query(TimedEvent, AND(TimedEvent.time == fromWhen, TimedEvent.runnable == runnable)): evt.time = toWhen self._transientSchedule(toWhen, self.now()) break else: raise ValueError("%r is not scheduled to run at %r" % (runnable, fromWhen)) def unscheduleFirst(self, runnable): """ Remove from given item from the schedule. If runnable is scheduled to run multiple times, only the temporally first is removed. """ for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable, sort=TimedEvent.time.ascending): evt.deleteFromStore() break def unscheduleAll(self, runnable): for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable): evt.deleteFromStore() def scheduledTimes(self, runnable): """ Return an iterable of the times at which the given item is scheduled to run. """ events = self.store.query( TimedEvent, TimedEvent.runnable == runnable) return (event.time for event in events if not event.running) _EPSILON = 1e-20 # A very small amount of time. class _SiteScheduler(object, Service, SchedulerMixin): """ Adapter from a site store to L{IScheduler}. """ implements(IScheduler) timer = None callLater = reactor.callLater now = Time def __init__(self, store): self.store = store def startService(self): """ Start calling persistent timed events whose time has come. """ super(_SiteScheduler, self).startService() self._transientSchedule(self.now(), self.now()) def stopService(self): """ Stop calling persistent timed events. """ super(_SiteScheduler, self).stopService() if self.timer is not None: self.timer.cancel() self.timer = None def tick(self): self.timer = None return super(_SiteScheduler, self).tick() def _transientSchedule(self, when, now): """ If the service is currently running, schedule a tick to happen no later than C{when}. @param when: The time at which to tick. @type when: L{epsilon.extime.Time} @param now: The current time. @type now: L{epsilon.extime.Time} """ if not self.running: return if self.timer is not None: if self.timer.getTime() < when.asPOSIXTimestamp(): return self.timer.cancel() delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp() # reactor.callLater allows only positive delay values. The scheduler # may want to have scheduled things in the past and that's OK, since we # are dealing with Time() instances it's impossible to predict what # they are relative to the current time from user code anyway. delay = max(_EPSILON, delay) self.timer = self.callLater(delay, self.tick) self.nextEventAt = when class _UserScheduler(object, Service, SchedulerMixin): """ Adapter from a non-site store to L{IScheduler}. """ implements(IScheduler) def __init__(self, store): self.store = store def now(self): """ Report the current time, as reported by the parent's scheduler. """ return IScheduler(self.store.parent).now() def _transientSchedule(self, when, now): """ If this service's store is attached to its parent, ask the parent to schedule this substore to tick at the given time. @param when: The time at which to tick. @type when: L{epsilon.extime.Time} @param now: Present for signature compatibility with L{_SiteScheduler._transientSchedule}, but ignored otherwise. """ if self.store.parent is not None: subStore = self.store.parent.getItemByID(self.store.idInParent) hook = self.store.parent.findOrCreate( _SubSchedulerParentHook, subStore=subStore) hook._schedule(when) def migrateDown(self): """ Remove the components in the site store for this SubScheduler. """ subStore = self.store.parent.getItemByID(self.store.idInParent) ssph = self.store.parent.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == subStore, default=None) if ssph is not None: te = self.store.parent.findUnique(TimedEvent, TimedEvent.runnable == ssph, default=None) if te is not None: te.deleteFromStore() ssph.deleteFromStore() def migrateUp(self): """ Recreate the hooks in the site store to trigger this SubScheduler. """ te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending) if te is not None: self._transientSchedule(te.time, None) class _SchedulerCompatMixin(object): """ Backwards compatibility helper for L{Scheduler} and L{SubScheduler}. This mixin provides all the attributes from L{IScheduler}, but provides them by adapting the L{Store} the item is in to L{IScheduler} and getting them from the resulting object. Primarily in support of test code, it also supports rebinding those attributes by rebinding them on the L{IScheduler} powerup. @see: L{IScheduler} """ implements(IScheduler) def forwardToReal(name): def get(self): return getattr(IScheduler(self.store), name) def set(self, value): setattr(IScheduler(self.store), name, value) return property(get, set) now = forwardToReal("now") tick = forwardToReal("tick") schedule = forwardToReal("schedule") reschedule = forwardToReal("reschedule") unschedule = forwardToReal("unschedule") unscheduleAll = forwardToReal("unscheduleAll") scheduledTimes = forwardToReal("scheduledTimes") def activate(self): """ Whenever L{Scheduler} or L{SubScheduler} is created, either newly or when loaded from a database, emit a deprecation warning referring people to L{IScheduler}. """ # This is unfortunate. Perhaps it is the best thing which works (it is # the first I found). -exarkun if '_axiom_memory_dummy' in vars(self): stacklevel = 7 else: stacklevel = 5 warnings.warn( self.__class__.__name__ + " is deprecated since Axiom 0.5.32. " "Just adapt stores to IScheduler.", category=PendingDeprecationWarning, stacklevel=stacklevel) class Scheduler(Item, _SchedulerCompatMixin): """ Track and execute persistent timed events for a I{site} store. This is deprecated and present only for backwards compatibility. Adapt the store to L{IScheduler} instead. """ implements(IService) typeName = 'axiom_scheduler' schemaVersion = 2 dummy = integer() def activate(self): _SchedulerCompatMixin.activate(self) def setServiceParent(self, parent): """ L{Scheduler} is no longer an L{IService}, but still provides this method as a no-op in case an instance which was still an L{IService} powerup is loaded (in which case it will be used like a service once). """ declareLegacyItem( Scheduler.typeName, 1, dict(eventsRun=integer(default=0), lastEventAt=timestamp(), nextEventAt=timestamp())) def scheduler1to2(old): new = old.upgradeVersion(Scheduler.typeName, 1, 2) new.store.powerDown(new, IService) new.store.powerDown(new, IScheduler) return new registerUpgrader(scheduler1to2, Scheduler.typeName, 1, 2) class _SubSchedulerParentHook(Item): schemaVersion = 4 typeName = 'axiom_subscheduler_parent_hook' subStore = reference( doc=""" The L{SubStore} for which this scheduling hook exists. """, reftype=SubStore) def run(self): """ Tick our C{subStore}'s L{SubScheduler}. """ IScheduler(self.subStore).tick() def _schedule(self, when): """ Ensure that this hook is scheduled to run at or before C{when}. """ sched = IScheduler(self.store) for scheduledAt in sched.scheduledTimes(self): if when < scheduledAt: sched.reschedule(self, scheduledAt, when) break else: sched.schedule(self, when) def upgradeParentHook1to2(oldHook): """ Add the scheduler attribute to the given L{_SubSchedulerParentHook}. """ newHook = oldHook.upgradeVersion( oldHook.typeName, 1, 2, loginAccount=oldHook.loginAccount, scheduledAt=oldHook.scheduledAt, scheduler=oldHook.store.findFirst(Scheduler)) return newHook registerUpgrader(upgradeParentHook1to2, _SubSchedulerParentHook.typeName, 1, 2) declareLegacyItem( _SubSchedulerParentHook.typeName, 2, dict(loginAccount=reference(), scheduledAt=timestamp(default=None), scheduler=reference())) def upgradeParentHook2to3(old): """ Copy the C{loginAccount} attribute, but drop the others. """ return old.upgradeVersion( old.typeName, 2, 3, loginAccount=old.loginAccount) registerUpgrader(upgradeParentHook2to3, _SubSchedulerParentHook.typeName, 2, 3) declareLegacyItem( _SubSchedulerParentHook.typeName, 3, dict(loginAccount=reference(), scheduler=reference())) def upgradeParentHook3to4(old): """ Copy C{loginAccount} to C{subStore} and remove the installation marker. """ new = old.upgradeVersion( old.typeName, 3, 4, subStore=old.loginAccount) uninstallFrom(new, new.store) return new registerUpgrader(upgradeParentHook3to4, _SubSchedulerParentHook.typeName, 3, 4) class SubScheduler(Item, _SchedulerCompatMixin): """ Track and execute persistent timed events for a substore. This is deprecated and present only for backwards compatibility. Adapt the store to L{IScheduler} instead. """ schemaVersion = 2 typeName = 'axiom_subscheduler' dummy = integer() def activate(self): _SchedulerCompatMixin.activate(self) def subscheduler1to2(old): new = old.upgradeVersion(SubScheduler.typeName, 1, 2) try: new.store.powerDown(new, IScheduler) except ValueError: # Someone might have created a SubScheduler but failed to power it # up. Fine. pass return new registerUpgrader(subscheduler1to2, SubScheduler.typeName, 1, 2) Axiom-0.6.0/axiom/sequence.py0000644000175000017500000001355710420010677016015 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_sequence -*- from axiom.item import Item from axiom.attributes import reference, integer, AND class _ListItem(Item): typeName = 'list_item' schemaVersion = 1 _index = integer() _value = reference() _container = reference() class List(Item): typeName = 'list' schemaVersion = 1 length = integer(default=0) def __init__(self, *args, **kw): super(List, self).__init__(**kw) if args: self.extend(args[0]) def _queryListItems(self): return self.store.query(_ListItem, _ListItem._container == self) def _getListItem(self, index): return list(self.store.query(_ListItem, AND(_ListItem._container == self, _ListItem._index == index)))[0] def _delListItem(self, index, resetIndexes=True): for li in self.store.query(_ListItem, AND(_ListItem._container == self, _ListItem._index == index)): li.deleteFromStore(deleteObject=True) break def _fixIndex(self, index, truncate=False): """ @param truncate: If true, negative indices which go past the beginning of the list will be evaluated as zero. For example:: >>> L = List([1,2,3,4,5]) >>> len(L) 5 >>> L._fixIndex(-9, truncate=True) 0 """ assert not isinstance(index, slice), 'slices are not supported (yet)' if index < 0: index += self.length if index < 0: if not truncate: raise IndexError('stored List index out of range') else: index = 0 return index def __getitem__(self, index): index = self._fixIndex(index) return self._getListItem(index)._value def __setitem__(self, index, value): index = self._fixIndex(index) self._getListItem(index)._value = value def __add__(self, other): return list(self) + list(other) def __radd__(self, other): return list(other) + list(self) def __mul__(self, other): return list(self) * other def __rmul__(self, other): return other * list(self) def index(self, other, start=0, maximum=None): if maximum is None: maximum = len(self) for pos in range(start, maximum): if pos >= len(self): break if self[pos] == other: return pos raise ValueError, 'List.index(x): %r not in List' % other def __len__(self): return self.length def __delitem__(self, index): assert not isinstance(index, slice), 'slices are not supported (yet)' self._getListItem(index).deleteFromStore() if index < self.length - 1: for item in self.store.query(_ListItem, AND( _ListItem._container == self, _ListItem._index > index)): item._index -= 1 self.length -= 1 def __contains__(self, value): return bool(self.count(value)) def append(self, value): """ @type value: L{axiom.item.Item} @param value: Must be stored in the same L{Store} as this L{List} instance. """ # XXX: Should List.append(unstoredItem) automatically store the item? self.insert(self.length, value) def extend(self, other): for item in iter(other): self.append(item) def insert(self, index, value): index = self._fixIndex(index, truncate=True) # If we do List(length=5).insert(50, x), we don't want # x's _ListItem._index to actually be 50. index = min(index, self.length) # This uses list() in case our contents change halfway through. # But does that _really_ work? for li in list(self.store.query(_ListItem, AND(_ListItem._container == self, _ListItem._index >= index))): # XXX: The performance of this operation probably sucks # compared to what it would be with an UPDATE. li._index += 1 _ListItem(store=self.store, _value=value, _container=self, _index=index) self.length += 1 def pop(self, index=None): if index is None: index = self.length - 1 index = self._fixIndex(index) x = self[index] del self[index] return x def remove(self, value): del self[self.index(value)] def reverse(self): # XXX: Also needs to be an atomic action. length = 0 for li in list(self.store.query(_ListItem, _ListItem._container == self, sort=_ListItem._index.desc)): li._index = length length += 1 self.length = length def sort(self, *args): # We want to sort by value, not sort by _ListItem. We could # accomplish this by having _ListItem.__cmp__ do something # with self._value, but that seemed wrong. This was easier. values = [li._value for li in self._queryListItems()] values.sort(*args) index = 0 for li in self._queryListItems(): # XXX: Well, can it? assert index < len(values), \ '_ListItems were added during a sort (can this happen?)' li._index = index li._value = values[index] index += 1 def count(self, value): return self.store.count(_ListItem, AND( _ListItem._container == self, _ListItem._value == value)) Axiom-0.6.0/axiom/slotmachine.py0000644000175000017500000001344311203025675016511 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_slotmachine -*- hyper = super _NOSLOT = object() class Allowed(object): """ An attribute that's allowed to be set. """ def __init__(self, name, default=_NOSLOT): self.name = name self.default = default def __get__(self, oself, otype=None): if otype is not None and oself is None: return self if self.name in oself.__dict__: return oself.__dict__[self.name] if self.default is not _NOSLOT: return self.default raise AttributeError("%r object did not have attribute %r" %(oself.__class__.__name__, self.name)) def __delete__(self, oself): if self.name not in oself.__dict__: # Returning rather than raising here because that's what # member_descriptor does, and Axiom relies upon that behavior. ## raise AttributeError('%r object has no attribute %r' % ## (oself.__class__.__name__, self.name)) return del oself.__dict__[self.name] def __set__(self, oself, value): oself.__dict__[self.name] = value class _SlotMetaMachine(type): def __new__(meta, name, bases, dictionary): dictionary['__name__'] = name slots = list(meta.determineSchema(dictionary)) for slot in slots: for base in bases: defval = getattr(base, slot, _NOSLOT) if defval is not _NOSLOT: break dictionary[slot] = Allowed(slot, defval) nt = type.__new__(meta, name, bases, dictionary) return nt def determineSchema(meta, dictionary): return dictionary.get("slots", []) determineSchema = classmethod(determineSchema) class DescriptorWithDefault(object): def __init__(self, default, original): self.original = original self.default = default def __get__(self, oself, type=None): if type is not None: if oself is None: return self.default return getattr(oself, self.original, self.default) def __set__(self, oself, value): setattr(oself, self.original, value) def __delete__(self, oself): delattr(oself, self.original) class Attribute(object): def __init__(self, doc=''): self.doc = doc def requiredSlots(self, modname, classname, attrname): self.name = attrname yield attrname def __get__(self, oself, type=None): assert oself is None, "%s: should be masked" % (self.name,) return self _RAISE = object() class SetOnce(Attribute): def __init__(self, doc='', default=_RAISE): Attribute.__init__(self) if default is _RAISE: self.default = () else: self.default = (default,) def requiredSlots(self, modname, classname, attrname): self.name = attrname t = self.trueattr = ('_' + self.name) yield t def __set__(self, iself, value): if not hasattr(iself, self.trueattr): setattr(iself, self.trueattr, value) else: raise AttributeError('%s.%s may only be set once' % ( type(iself).__name__, self.name)) def __get__(self, iself, type=None): if type is not None and iself is None: return self return getattr(iself, self.trueattr, *self.default) class SchemaMetaMachine(_SlotMetaMachine): def determineSchema(meta, dictionary): attrs = dictionary['__attributes__'] = [] name = dictionary['__name__'] moduleName = dictionary['__module__'] dictitems = dictionary.items() dictitems.sort() for k, v in dictitems: if isinstance(v, Attribute): attrs.append((k, v)) for slot in v.requiredSlots(moduleName, name, k): if slot == k: del dictionary[k] yield slot determineSchema = classmethod(determineSchema) class _Strict(object): """ I disallow all attributes from being set that do not have an explicit data descriptor. """ def __setattr__(self, name, value): """ Like PyObject_GenericSetAttr, but call descriptors only. """ try: allowed = type(self).__dict__['_Strict__setattr__allowed'] except KeyError: allowed = type(self)._Strict__setattr__allowed = {} for cls in type(self).__mro__: for attrName, slot in cls.__dict__.iteritems(): if attrName in allowed: # It was found earlier in the mro, overriding # whatever this is. Ignore it and move on. continue setter = getattr(slot, '__set__', _NOSLOT) if setter is not _NOSLOT: # It is a data descriptor, so remember the setter # for it in the cache. allowed[attrName] = setter else: # It is something else, so remember None for it in # the cache to indicate it cannot have its value # set. allowed[attrName] = None try: setter = allowed[name] except KeyError: pass else: if setter is not None: setter(self, value) return # It wasn't found in the setter cache or it was found to be None, # indicating a non-data descriptor which cannot be set. raise AttributeError( "%r can't set attribute %r" % (self.__class__.__name__, name)) class SchemaMachine(_Strict): __metaclass__ = SchemaMetaMachine class SlotMachine(_Strict): __metaclass__ = _SlotMetaMachine Axiom-0.6.0/axiom/store.py0000644000175000017500000025625611224737657015367 0ustar exarkunexarkun# Copyright 2008 Divmod, Inc. See LICENSE for details # -*- test-case-name: axiom.test -*- """ This module holds the Axiom Store class and related classes, such as queries. """ from epsilon import hotfix hotfix.require('twisted', 'filepath_copyTo') import time, os, itertools, warnings, sys, operator, weakref from zope.interface import implements from twisted.python import log from twisted.python.failure import Failure from twisted.python import filepath from twisted.internet import defer from twisted.python.reflect import namedAny from twisted.python.util import unsignedID from twisted.application.service import IService, IServiceCollection from epsilon.pending import PendingEvent from epsilon.cooperator import SchedulingService from axiom import _schema, attributes, upgrade, _fincache, iaxiom, errors from axiom import item from axiom._pysqlite2 import Connection from axiom.item import \ _typeNameToMostRecentClass, declareLegacyItem, \ _legacyTypes, Empowered, serviceSpecialCase, _StoreIDComparer IN_MEMORY_DATABASE = ':memory:' # The special storeID used to mark the store itself as the target of a # reference. STORE_SELF_ID = -1 tempCounter = itertools.count() # A mapping from MetaItem instances to precomputed structures describing the # indexes necessary for those MetaItems. Avoiding recomputing this speeds up # opening stores significantly. _requiredTableIndexes = weakref.WeakKeyDictionary() # A mapping from MetaItem instances to precomputed structures describing the # known in-memory schema for those MetaItems. Avoiding recomputing this speeds # up opening stores significantly. _inMemorySchemaCache = weakref.WeakKeyDictionary() class NoEmptyItems(Exception): """You must define some attributes on every item. """ def _mkdirIfNotExists(dirname): if os.path.isdir(dirname): return False os.makedirs(dirname) return True class AtomicFile(file): """I am a file which is moved from temporary to permanent storage when it is closed. After I'm closed, I will have a 'finalpath' property saying where I went. """ implements(iaxiom.IAtomicFile) def __init__(self, tempname, destpath): """ Create an AtomicFile. (Note: AtomicFiles can only be opened in write-binary mode.) @param tempname: The filename to open for temporary storage. @param destpath: The filename to move this file to when .close() is called. """ self._destpath = destpath file.__init__(self, tempname, 'w+b') def close(self): """ Close this file and commit it to its permanent location. @return: a Deferred which fires when the file has been moved (and backed up to tertiary storage, if necessary). """ now = time.time() try: file.close(self) _mkdirIfNotExists(self._destpath.dirname()) self.finalpath = self._destpath os.rename(self.name, self.finalpath.path) os.utime(self.finalpath.path, (now, now)) except: return defer.fail() return defer.succeed(self.finalpath) def abort(self): os.unlink(self.name) _noItem = object() # tag for optional argument to getItemByID # default def storeServiceSpecialCase(st, pups): """ Adapt a store to L{IServiceCollection}. @param st: The L{Store} to adapt. @param pups: A list of L{IServiceCollection} powerups on C{st}. @return: An L{IServiceCollection} which has all of C{pups} as children. """ if st.parent is not None: # If for some bizarre reason we're starting a substore's service, let's # just assume that its parent is running its upgraders, rather than # risk starting the upgrader run twice. (XXX: it *IS* possible to # figure out whether we need to or not, I just doubt this will ever # even happen in practice -- fix here if it does) return serviceSpecialCase(st, pups) if st._axiom_service is not None: # not new, don't add twice. return st._axiom_service collection = serviceSpecialCase(st, pups) st._upgradeService.setServiceParent(collection) if st.dbdir is not None: from axiom import batch batcher = batch.BatchProcessingControllerService(st) batcher.setServiceParent(collection) return collection def _typeIsTotallyUnknown(typename, version): return ((typename not in _typeNameToMostRecentClass) and ((typename, version) not in _legacyTypes)) class BaseQuery: """ This is the abstract base implementation of query logic shared between item and attribute queries. Note: as this is an abstract class, it doesn't *actually* implement IQuery, but all its subclasses must, so it is declared to. Don't instantiate it directly. """ # XXX: need a better convention for this sort of # abstract-but-provide-most-of-a-base-implementation thing. -glyph # How about not putting the implements(iaxiom.IQuery) here, but on # subclasses instead? -exarkun implements(iaxiom.IQuery) def __init__(self, store, tableClass, comparison=None, limit=None, offset=None, sort=None): """ Create a generic object-oriented interface to SQL, used to implement Store.query. @param store: the store that this query is within. @param tableClass: a subclass of L{Item}. @param comparison: an implementor of L{iaxiom.IComparison} @param limit: an L{int} that limits the number of results that will be queried for, or None to indicate that all results should be returned. @param offset: an L{int} that specifies the offset within the query results to begin iterating from, or None to indicate that we should start at 0. @param sort: A sort order object. Obtained by doing C{YourItemClass.yourAttribute.ascending} or C{.descending}. """ self.store = store self.tableClass = tableClass self.comparison = comparison self.limit = limit self.offset = offset self.sort = iaxiom.IOrdering(sort) tables = self._involvedTables() self._computeFromClause(tables) _cloneAttributes = 'store tableClass comparison limit offset sort'.split() # IQuery def cloneQuery(self, limit=_noItem): clonekw = {} for attr in self._cloneAttributes: clonekw[attr] = getattr(self, attr) if limit is not _noItem: clonekw['limit'] = limit return self.__class__(**clonekw) def __repr__(self): return self.__class__.__name__ + '(' + ', '.join([ repr(self.store), repr(self.tableClass), repr(self.comparison), repr(self.limit), repr(self.offset), repr(self.sort)]) + ')' def explain(self): """ A debugging API, exposing SQLite's I{EXPLAIN} statement. While this is not a private method, you also probably don't have any use for it unless you understand U{SQLite opcodes} very well. Once you do, it can be handy to call this interactively to get a sense of the complexity of a query. @return: a list, the first element of which is a L{str} (the SQL statement which will be run), and the remainder of which is 3-tuples resulting from the I{EXPLAIN} of that statement. """ return ([self._sqlAndArgs('SELECT', self._queryTarget)[0]] + self._runQuery('EXPLAIN SELECT', self._queryTarget)) def _involvedTables(self): """ Return a list of tables involved in this query, first checking that no required tables (those in the query target) have been omitted from the comparison. """ # SQL and arguments if self.comparison is not None: tables = self.comparison.getInvolvedTables() self.args = self.comparison.getArgs(self.store) else: tables = [self.tableClass] self.args = [] if self.tableClass not in tables: raise ValueError( "Comparison omits required reference to result type") return tables def _computeFromClause(self, tables): """ Generate the SQL string which follows the "FROM" string and before the "WHERE" string in the final SQL statement. """ tableAliases = [] self.fromClauseParts = [] for table in tables: # The indirect calls to store.getTableName() will create the tables # if needed. (XXX That's bad, actually. They should get created # some other way if necessary. -exarkun) tableName = table.getTableName(self.store) tableAlias = table.getTableAlias(self.store, tuple(tableAliases)) if tableAlias is None: self.fromClauseParts.append(tableName) else: tableAliases.append(tableAlias) self.fromClauseParts.append('%s AS %s' % (tableName, tableAlias)) self.sortClauseParts = [] for attr, direction in self.sort.orderColumns(): assert direction in ('ASC', 'DESC'), "%r not in ASC,DESC" % (direction,) if attr.type not in tables: raise ValueError( "Ordering references type excluded from comparison") self.sortClauseParts.append( '%s %s' % (attr.getColumnName(self.store), direction)) def _sqlAndArgs(self, verb, subject): limitClause = [] if self.limit is not None: # XXX LIMIT and OFFSET used to be using ?, but they started # generating syntax errors in places where generating the whole SQL # statement does not. this smells like a bug in sqlite's parser to # me, but I don't know my SQL syntax standards well enough to be # sure -glyph if not isinstance(self.limit, (int, long)): raise TypeError("limit must be an integer: %r" % (self.limit,)) limitClause.append('LIMIT') limitClause.append(str(self.limit)) if self.offset is not None: if not isinstance(self.offset, (int, long)): raise TypeError("offset must be an integer: %r" % (self.offset,)) limitClause.append('OFFSET') limitClause.append(str(self.offset)) else: assert self.offset is None, 'Offset specified without limit' sqlParts = [verb, subject] if self.fromClauseParts: sqlParts.extend(['FROM', ', '.join(self.fromClauseParts)]) if self.comparison is not None: sqlParts.extend(['WHERE', self.comparison.getQuery(self.store)]) if self.sortClauseParts: sqlParts.extend(['ORDER BY', ', '.join(self.sortClauseParts)]) if limitClause: sqlParts.append(' '.join(limitClause)) sqlstr = ' '.join(sqlParts) return (sqlstr, self.args) def _runQuery(self, verb, subject): # XXX ideally this should be creating an SQL cursor and iterating # through that so we don't have to load the whole query into memory, # but right now Store's interface to SQL is all through one cursor. # I'm not sure how to do this and preserve the chokepoint so that we # can do, e.g. transaction fallbacks. t = time.time() if not self.store.autocommit: self.store.checkpoint() sqlstr, sqlargs = self._sqlAndArgs(verb, subject) sqlResults = self.store.querySQL(sqlstr, sqlargs) cs = self.locateCallSite() log.msg(interface=iaxiom.IStatEvent, querySite=cs, queryTime=time.time() - t, querySQL=sqlstr) return sqlResults def locateCallSite(self): i = 3 frame = sys._getframe(i) while frame.f_code.co_filename == __file__: #let's not get stuck in findOrCreate, etc i += 1 frame = sys._getframe(i) return (frame.f_code.co_filename, frame.f_lineno) def _selectStuff(self, verb='SELECT'): """ Return a generator which yields the massaged results of this query with a particular SQL verb. For an attribute query, massaged results are of the type of that attribute. For an item query, they are items of the type the query is supposed to return. @param verb: a str containing the SQL verb to execute. This really must be some variant of 'SELECT', the only two currently implemented being 'SELECT' and 'SELECT DISTINCT'. """ sqlResults = self._runQuery(verb, self._queryTarget) for row in sqlResults: yield self._massageData(row) def _massageData(self, row): """ Subclasses must override this method to 'massage' the data received from the database, converting it from data direct from the database into Python objects of the appropriate form. @param row: a tuple of some kind, representing an element of data returned from a call to sqlite. """ raise NotImplementedError() def distinct(self): """ Call this method if you want to avoid repeated results from a query. You can call this on either an attribute or item query. For example, on an attribute query:: X(store=s, value=1, name=u'foo') X(store=s, value=1, name=u'bar') X(store=s, value=2, name=u'baz') X(store=s, value=3, name=u'qux') list(s.query(X).getColumn('value')) => [1, 1, 2, 3] list(s.query(X).getColumn('value').distinct()) => [1, 2, 3] You can also use distinct queries to eliminate duplicate results from joining two Item types together in a query, like so:: x = X(store=s, value=1, name=u'hello') Y(store=s, other=x, ident=u'a') Y(store=s, other=x, ident=u'b') Y(store=s, other=x, ident=u'b+') list(s.query(X, AND(Y.other == X.storeID, Y.ident.startswith(u'b')))) => [X(name=u'hello', value=1, storeID=1)@..., X(name=u'hello', value=1, storeID=1)@...] list(s.query(X, AND(Y.other == X.storeID, Y.ident.startswith(u'b'))).distinct()) => [X(name=u'hello', value=1, storeID=1)@...] @return: an L{iaxiom.IQuery} provider whose values are distinct. """ return _DistinctQuery(self) def __iter__(self): """ Iterate the results of this query. """ return self._selectStuff('SELECT') _selfiter = None def next(self): """ This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data. """ if self._selfiter is None: warnings.warn( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?", DeprecationWarning, stacklevel=2) self._selfiter = self.__iter__() return self._selfiter.next() class _FakeItemForFilter: __legacy__ = False def __init__(self, store): self.store = store def _isColumnUnique(col): """ Determine if an IColumn provider is unique. @param col: an L{IColumn} provider @return: True if the IColumn provider is unique, False otherwise. """ return isinstance(col, _StoreIDComparer) class ItemQuery(BaseQuery): """ This class is a query whose results will be Item instances. This is the type always returned from L{Store.query}. """ def __init__(self, *a, **k): """ Create an ItemQuery. This is typically done via L{Store.query}. """ BaseQuery.__init__(self, *a, **k) self._queryTarget = ( self.tableClass.storeID.getColumnName(self.store) + ', ' + ( ', '.join( [attrobj.getColumnName(self.store) for name, attrobj in self.tableClass.getSchema() ]))) def paginate(self, pagesize=20): """ Split up the work of gathering a result set into multiple smaller 'pages', allowing very large queries to be iterated without blocking for long periods of time. While simply iterating C{paginate()} is very similar to iterating a query directly, using this method allows the work to obtain the results to be performed on demand, over a series of different transaction. @param pagesize: the number of results gather in each chunk of work. (This is mostly for testing paginate's implementation.) @type pagesize: L{int} @return: an iterable which yields all the results of this query. """ sort = self.sort oc = list(sort.orderColumns()) if not oc: # You can't have an unsorted pagination. sort = self.tableClass.storeID.ascending oc = list(sort.orderColumns()) if len(oc) != 1: raise RuntimeError("%d-column sorts not supported yet with paginate" %(len(oc),)) sortColumn = oc[0][0] if oc[0][1] == 'ASC': sortOp = operator.gt else: sortOp = operator.lt if _isColumnUnique(sortColumn): # This is the easy case. There is never a tie to be broken, so we # can just remember our last value and yield from there. Right now # this only happens when the column is a storeID, but hopefully in # the future we will have more of this. tiebreaker = None else: tiebreaker = self.tableClass.storeID tied = lambda a, b: (sortColumn.__get__(a) == sortColumn.__get__(b)) def _AND(a, b): if a is None: return b return attributes.AND(a, b) results = list(self.store.query(self.tableClass, self.comparison, sort=sort, limit=pagesize + 1)) while results: if len(results) == 1: # XXX TODO: reject 0 pagesize. If the length of the result set # is 1, there's no next result to test for a tie with, so we # must be at the end, and we should just yield the result and finish. yield results[0] return for resultidx in range(len(results) - 1): # check for a tie. result = results[resultidx] nextResult = results[resultidx + 1] if tied(result, nextResult): # Yield any ties first, in the appropriate order. lastTieBreaker = tiebreaker.__get__(result) # Note that this query is _NOT_ limited: currently large ties # will generate arbitrarily large amounts of work. trq = self.store.query( self.tableClass, _AND(self.comparison, sortColumn == sortColumn.__get__(result))) tiedResults = list(trq) tiedResults.sort(key=lambda rslt: (sortColumn.__get__(result), tiebreaker.__get__(result))) for result in tiedResults: yield result # re-start the query here ('result' is set to the # appropriate value by the inner loop) break else: yield result lastSortValue = sortColumn.__get__(result) # hooray namespace pollution results = list(self.store.query( self.tableClass, _AND(self.comparison, sortOp(sortColumn, sortColumn.__get__(result))), sort=sort, limit=pagesize + 1)) def _massageData(self, row): """ Convert a row into an Item instance by loading cached items or creating new ones based on query results. @param row: an n-tuple, where n is the number of columns specified by my item type. @return: an instance of the type specified by this query. """ result = self.store._loadedItem(self.tableClass, row[0], row[1:]) assert result.store is not None, "result %r has funky store" % (result,) return result def getColumn(self, attributeName, raw=False): """ Get an L{iaxiom.IQuery} whose results will be values of a single attribute rather than an Item. @param attributeName: a L{str}, the name of a Python attribute, that describes a column on the Item subclass that this query was specified for. @return: an L{AttributeQuery} for the column described by the attribute named L{attributeName} on the item class that this query's results will be instances of. """ # XXX: 'raw' is undocumented because I think it's completely unused, # and it's definitely untested. It should probably be removed when # someone has the time. -glyph # Quotient POP3 server uses it. Not that it shouldn't be removed. # ;) -exarkun attr = getattr(self.tableClass, attributeName) return AttributeQuery(self.store, self.tableClass, self.comparison, self.limit, self.offset, self.sort, attr, raw) def count(self): rslt = self._runQuery( 'SELECT', 'COUNT(' + self.tableClass.storeID.getColumnName(self.store) + ')') assert len(rslt) == 1, 'more than one result: %r' % (rslt,) return rslt[0][0] or 0 def deleteFromStore(self): """ Delete all the Items which are found by this query. """ #We can do this the fast way or the slow way. # If there's a 'deleted' callback on the Item type or 'deleteFromStore' # is overridden, we have to do it the slow way. deletedOverridden = ( self.tableClass.deleted.im_func is not item.Item.deleted.im_func) deleteFromStoreOverridden = ( self.tableClass.deleteFromStore.im_func is not item.Item.deleteFromStore.im_func) if deletedOverridden or deleteFromStoreOverridden: for it in self: it.deleteFromStore() else: # Find other item types whose instances need to be deleted # when items of the type in this query are deleted, and # remove them from the store. def itemsToDelete(attr): return attr.oneOf(self.getColumn("storeID")) if not item.allowDeletion(self.store, self.tableClass, itemsToDelete): raise errors.DeletionDisallowed( 'Cannot delete item; ' 'has referents with whenDeleted == reference.DISALLOW') for it in item.dependentItems(self.store, self.tableClass, itemsToDelete): it.deleteFromStore() # actually run the DELETE for the items in this query. self._runQuery('DELETE', "") class MultipleItemQuery(BaseQuery): """ A query that returns tuples of Items from a join. """ def __init__(self, *a, **k): """ Create a MultipleItemQuery. This is typically done via L{Store.query}. """ BaseQuery.__init__(self, *a, **k) # Just in case it's some other kind of iterable. self.tableClass = tuple(self.tableClass) if len(self.tableClass) == 0: raise ValueError("Multiple item queries must have " "at least one table class") targets = [] # Later when we massage data out, we need to slice the row. # This records the slice lengths. self.schemaLengths = [] # self.tableClass is a tuple of Item classes. for tableClass in self.tableClass: schema = tableClass.getSchema() # The extra 1 is oid self.schemaLengths.append(len(schema) + 1) targets.append( tableClass.storeID.getColumnName(self.store) + ', ' + ( ', '.join( [attrobj.getColumnName(self.store) for name, attrobj in schema ]))) self._queryTarget = ', '.join(targets) def _involvedTables(self): """ Return a list of tables involved in this query, first checking that no required tables (those in the query target) have been omitted from the comparison. """ # SQL and arguments if self.comparison is not None: tables = self.comparison.getInvolvedTables() self.args = self.comparison.getArgs(self.store) else: tables = list(self.tableClass) self.args = [] for tableClass in self.tableClass: if tableClass not in tables: raise ValueError( "Comparison omits required reference to result type %s" % tableClass.typeName) return tables def _massageData(self, row): """ Convert a row into a tuple of Item instances, by slicing it according to the number of columns for each instance, and then proceeding as for ItemQuery._massageData. @param row: an n-tuple, where n is the total number of columns specified by all the item types in this query. @return: a tuple of instances of the types specified by this query. """ offset = 0 resultBits = [] for i, tableClass in enumerate(self.tableClass): numAttrs = self.schemaLengths[i] result = self.store._loadedItem(self.tableClass[i], row[offset], row[offset+1:offset+numAttrs]) assert result.store is not None, "result %r has funky store" % (result,) resultBits.append(result) offset += numAttrs return tuple(resultBits) def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """ if not self.store.autocommit: self.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.store) for tableClass in self.tableClass ]) sql, args = self._sqlAndArgs('SELECT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0 def distinct(self): """ @return: an L{iaxiom.IQuery} provider whose values are distinct. """ return _MultipleItemDistinctQuery(self) class _DistinctQuery(object): """ A query for results excluding duplicates. Results from this query depend on the query it was initialized with. """ implements(iaxiom.IQuery) def __init__(self, query): """ Create a distinct query, based on another query. @param query: an instance of a L{BaseQuery} subclass. Note: an IQuery provider is not sufficient, this class relies on implementation details of L{BaseQuery}. """ self.query = query self.store = query.store self.limit = query.limit def cloneQuery(self, limit=_noItem): """ Clone the original query which this distinct query wraps, and return a new wrapper around that clone. """ newq = self.query.cloneQuery(limit=limit) return self.__class__(newq) def __iter__(self): """ Iterate the distinct results of the wrapped query. @return: a generator which yields distinct values from its delegate query, whether they are items or attributes. """ return self.query._selectStuff('SELECT DISTINCT') def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """ if not self.query.store.autocommit: self.query.store.checkpoint() sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', self.query.tableClass.storeID.getColumnName(self.query.store)) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0 class _MultipleItemDistinctQuery(_DistinctQuery): """ Distinct query based on a MultipleItemQuery. """ def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """ if not self.query.store.autocommit: self.query.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.query.store) for tableClass in self.query.tableClass ]) sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0 _noDefault = object() class AttributeQuery(BaseQuery): """ A query for the value of a single attribute from an item class, so as to load only a single value rather than an instantiating an entire item when the value is all that is needed. """ def __init__(self, store, tableClass, comparison=None, limit=None, offset=None, sort=None, attribute=None, raw=False): BaseQuery.__init__(self, store, tableClass, comparison, limit, offset, sort) self.attribute = attribute self.raw = raw self._queryTarget = attribute.getColumnName(self.store) _cloneAttributes = BaseQuery._cloneAttributes + 'attribute raw'.split() def _massageData(self, row): """ Convert a raw database row to the type described by an attribute. For example, convert a database integer into an L{extime.Time} instance for an L{attributes.timestamp} attribute. @param row: a 1-tuple, containing the in-database value from my attribute. @return: a value of the type described by my attribute. """ if self.raw: return row[0] return self.attribute.outfilter(row[0], _FakeItemForFilter(self.store)) def count(self): """ @return: the number of non-None values of this attribute specified by this query. """ rslt = self._runQuery('SELECT', 'COUNT(%s)' % (self._queryTarget,)) or [(0,)] assert len(rslt) == 1, 'more than one result: %r' % (rslt,) return rslt[0][0] def sum(self): """ Return the sum of all the values returned by this query. If no results are specified, return None. Note: for non-numeric column types the result of this method will be nonsensical. @return: a number or None. """ res = self._runQuery('SELECT', 'SUM(%s)' % (self._queryTarget,)) or [(0,)] assert len(res) == 1, "more than one result: %r" % (res,) dbval = res[0][0] or 0 return self.attribute.outfilter(dbval, _FakeItemForFilter(self.store)) def average(self): """ Return the average value (as defined by the AVG implementation in the database) of the values specified by this query. Note: for non-numeric column types the result of this method will be nonsensical. @return: a L{float} representing the 'average' value of this column. """ rslt = self._runQuery('SELECT', 'AVG(%s)' % (self._queryTarget,)) or [(0,)] assert len(rslt) == 1, 'more than one result: %r' % (rslt,) return rslt[0][0] def max(self, default=_noDefault): return self._functionOnTarget('MAX', default) def min(self, default=_noDefault): return self._functionOnTarget('MIN', default) def _functionOnTarget(self, which, default): rslt = self._runQuery('SELECT', '%s(%s)' % (which, self._queryTarget,)) or [(None,)] assert len(rslt) == 1, 'more than one result: %r' % (rslt,) dbval = rslt[0][0] if dbval is None: if default is _noDefault: raise ValueError, '%s() on table with no items'%(which) else: return default return self.attribute.outfilter(dbval, _FakeItemForFilter(self.store)) def _storeBatchServiceSpecialCase(*args, **kwargs): """ Trivial wrapper around L{batch.storeBatchServiceSpecialCase} to delay the import of axiom.batch, which imports the reactor, which we do not want as a side-effect of importing L{axiom.store} (as this would preclude selecting a reactor after importing this module; see #2864). """ from axiom import batch return batch.storeBatchServiceSpecialCase(*args, **kwargs) def _schedulerServiceSpecialCase(empowered, pups): """ This function creates (or returns a previously created) L{IScheduler} powerup. If L{IScheduler} powerups were found on C{empowered}, the first of those is given priority. Otherwise, a site L{Store} or a user L{Store} will have any pre-existing L{IScheduler} powerup associated with them (on the hackish cache attribute C{_schedulerService}) returned, or a new one created if none exists already. """ from axiom.scheduler import _SiteScheduler, _UserScheduler # Give precedence to anything found in the store for pup in pups: return pup # If the empowered is a store, construct a scheduler for it. if isinstance(empowered, Store): if getattr(empowered, '_schedulerService', None) is None: if empowered.parent is None: sched = _SiteScheduler(empowered) else: sched = _UserScheduler(empowered) sched.setServiceParent(IService(empowered)) empowered._schedulerService = sched return empowered._schedulerService return None class Store(Empowered): """ I am a database that Axiom Items can be stored in. Store an item in me by setting its 'store' attribute to be me. I can be created one of two ways:: Store() # Create an in-memory database Store("/path/to/file.axiom") # create an on-disk database in the # directory /path/to/file.axiom @ivar typeToTableNameCache: a dictionary mapping Item subclass type objects to the fully-qualified sqlite table name where items of that type are stored. This cache is generated from the saved schema metadata when this store is opened and updated when schema changes from other store objects (such as in other processes) are detected. @cvar __legacy__: an L{Item} may refer to a L{Store} via a L{reference}, and this attribute tells the item reference system that the store itself is not an old version of an item; i.e. it does not need to have its upgraders invoked. @cvar storeID: an L{Item} may refer to a L{Store} via a L{reference}, and this attribute tells the item reference system that the L{Store} has a special ID that to use (which is never allocated to any item). """ aggregateInterfaces = { IService: storeServiceSpecialCase, IServiceCollection: storeServiceSpecialCase, iaxiom.IBatchService: _storeBatchServiceSpecialCase, iaxiom.IScheduler: _schedulerServiceSpecialCase} implements(iaxiom.IBeneficiary) transaction = None # set of objects changed in the current transaction touched = None # set of objects changed since the last checkpoint databaseName = 'main' # can differ if database is attached to another # database. dbdir = None # FilePath to the Axiom database directory, or None for # in-memory Stores. filesdir = None # FilePath to the filesystem-storage subdirectory of the # database directory, or None for in-memory Stores. store = property(lambda self: self) # I have a 'store' attribute because I # am 'stored' within myself; this is # also for references to use. # Counter indicating things are going on which disallows changes to the # database. Callbacks dispatched to application code while this is # non-zero will reject database changes with a ChangeRejected exception. _rejectChanges = 0 # The following method and attributes are the ad-hoc interface required as # targets of attributes.reference attributes. (In other words, the store # is a little bit like a fake item.) These should probably eventually be # on an interface somewhere, and be better named. def _currentlyValidAsReferentFor(self, store): """ Check to see if this store is currently valid as a target of a reference from an item in the given L{Store}. This is true iff the given L{Store} is this L{Store}. @param store: the store that the referring item is present in. @type store: L{Store} """ if store is self: return True else: return False __legacy__ = False storeID = STORE_SELF_ID def __init__(self, dbdir=None, filesdir=None, debug=False, parent=None, idInParent=None): """ Create a store. @param dbdir: A L{FilePath} to (or name of) an existing Axiom directory, or directory that does not exist yet which will be created as this Store is instantiated. If unspecified, this database will be kept in memory. @param filesdir: A L{FilePath} to (or name of) a directory to keep files in for in-memory stores. An exception will be raised if both this attribute and C{dbdir} are specified. @param debug: set to True if this Store should print out every SQL statement it sends to SQLite. @param parent: (internal) If this is opened using an L{axiom.substore.Substore}, a reference to its parent. @param idInParent: (internal) If this is opened using an L{axiom.substore.Substore}, the storeID of the item within its parent which opened it. @raises: C{ValueError} if both C{dbdir} and C{filesdir} are specified. """ if parent is not None or idInParent is not None: assert parent is not None assert idInParent is not None self.parent = parent self.idInParent = idInParent self.debug = debug self.autocommit = True self.queryTimes = [] self.execTimes = [] self._inMemoryPowerups = {} self._attachedChildren = {} # database name => child store object self.statementCache = {} # non-normalized => normalized qmark SQL # statements self.activeTables = {} # tables which have had items added/removed # this run self.objectCache = _fincache.FinalizingCache() self.tableQueries = {} # map typename: query string w/ storeID # parameter. a typename is a persistent # database handle for what we'll call a 'FQPN', # i.e. arg to namedAny. self.typenameAndVersionToID = {} # map database-persistent typename and # version to an oid in the types table self.typeToInsertSQLCache = {} self.typeToSelectSQLCache = {} self.typeToDeleteSQLCache = {} self.typeToTableNameCache = {} self.attrToColumnNameCache = {} self._upgradeManager = upgrade._StoreUpgrade(self) self._axiom_service = None if self.parent is None: self._upgradeService = SchedulingService() else: # Substores should hook into their parent, since they shouldn't # expect to have their own substore service started. self._upgradeService = self.parent._upgradeService # OK! Everything that can be set up without touching the filesystem # has been done. Let's get ready to open the actual database... _initialOpenFailure = None if dbdir is None: self._initdb(IN_MEMORY_DATABASE) self._initSchema() self._memorySubstores = [] if filesdir is not None: if not isinstance(filesdir, filepath.FilePath): filesdir = filepath.FilePath(filesdir) self.filesdir = filesdir if not self.filesdir.isdir(): self.filesdir.makedirs() self.filesdir.child("temp").createDirectory() else: if filesdir is not None: raise ValueError("Only one of dbdir and filesdir" " may be specified") if not isinstance(dbdir, filepath.FilePath): dbdir = filepath.FilePath(dbdir) # required subdirs: files, temp, run # datafile: db.sqlite self.dbdir = dbdir self.filesdir = self.dbdir.child('files') if not dbdir.isdir(): tempdbdir = dbdir.temporarySibling() tempdbdir.makedirs() # maaaaaaaybe this is a bad idea, we # probably shouldn't be doing this # automatically. for child in ('files', 'temp', 'run'): tempdbdir.child(child).createDirectory() self._initdb(tempdbdir.child('db.sqlite').path) self._initSchema() self.close(_report=False) try: tempdbdir.moveTo(dbdir) except: _initialOpenFailure = Failure() try: self._initdb(dbdir.child('db.sqlite').path) except: if _initialOpenFailure is not None: log.msg("Failed to initialize axiom database." " Possible cause of error: ") log.err(_initialOpenFailure) raise self.transact(self._startup) # _startup may have found some things which we must now upgrade. if self._upgradeManager.upgradesPending: # Automatically upgrade when possible. self._upgradeComplete = PendingEvent() d = self._upgradeService.addIterator(self._upgradeManager.upgradeEverything()) def logUpgradeFailure(aFailure): if aFailure.check(errors.ItemUpgradeError): log.err(aFailure.value.originalFailure, 'Item upgrade error') log.err(aFailure, "upgrading %r failed" % (self,)) return aFailure d.addErrback(logUpgradeFailure) def finishHim(resultOrFailure): self._upgradeComplete.callback(resultOrFailure) self._upgradeComplete = None d.addBoth(finishHim) else: self._upgradeComplete = None log.msg( interface=iaxiom.IStatEvent, store_opened=self.dbdir is not None and self.dbdir.path or '') _childCounter = 0 def _attachChild(self, child): "attach a child database, returning an identifier for it" self._childCounter += 1 databaseName = 'child_db_%d' % (self._childCounter,) self._attachedChildren[databaseName] = child # ATTACH DATABASE statements can't use bind paramaters, blech. self.executeSQL("ATTACH DATABASE '%s' AS %s" % ( child.dbdir.child('db.sqlite').path, databaseName,)) return databaseName attachedToParent = False def attachToParent(self): assert self.parent is not None, 'must have a parent to attach' assert self.transaction is None, "can't attach within a transaction" self.close() self.attachedToParent = True self.databaseName = self.parent._attachChild(self) self.connection = self.parent.connection self.cursor = self.parent.cursor # def detachFromParent(self): # pass def _initSchema(self): # No point in even attempting to transactionalize this: # every single statement is a CREATE TABLE or a CREATE # INDEX and those commit transactions silently anyway. for stmt in _schema.BASE_SCHEMA: self.executeSchemaSQL(stmt) def _startup(self): """ Called during __init__. Check consistency of schema in database with classes in memory. Load all Python modules for stored items, and load version information for upgrader service to run later. """ typesToCheck = [] for oid, module, typename, version in self.querySchemaSQL(_schema.ALL_TYPES): if self.debug: print print 'SCHEMA:', oid, module, typename, version if typename not in _typeNameToMostRecentClass: try: namedAny(module) except ValueError, err: raise ImportError('cannot find module ' + module, str(err)) self.typenameAndVersionToID[typename, version] = oid # Can't call this until typenameAndVersionToID is populated, since this # depends on building a reverse map of that. persistedSchema = self._loadTypeSchema() # Now that we have persistedSchema, loop over everything again and # prepare old types. for (typename, version), typeID in self.typenameAndVersionToID.iteritems(): cls = _typeNameToMostRecentClass.get(typename) if cls is not None: if version != cls.schemaVersion: typesToCheck.append( self._prepareOldVersionOf( typename, version, persistedSchema)) else: typesToCheck.append(cls) for cls in typesToCheck: self._checkTypeSchemaConsistency(cls, persistedSchema) # Schema is consistent! Now, if I forgot to create any indexes last # time I saw this table, do it now... extantIndexes = self._loadExistingIndexes() for cls in typesToCheck: self._createIndexesFor(cls, extantIndexes) self._upgradeManager.checkUpgradePaths() def _loadExistingIndexes(self): """ Return a C{set} of the SQL indexes which already exist in the underlying database. It is important to load all of this information at once (as opposed to using many CREATE INDEX IF NOT EXISTS statements or many CREATE INDEX statements and handling the errors) to minimize the cost of opening a store. Loading all the indexes at once is much faster than doing pretty much anything that involves doing something once per required index. """ # Totally SQLite-specific: look up what indexes exist already in # sqlite_master so we can skip trying to create them (which can be # really slow). return set( name for (name,) in self.querySchemaSQL( "SELECT name FROM *DATABASE*.sqlite_master " "WHERE type = 'index'")) def _initdb(self, dbfname): self.connection = Connection.fromDatabaseName(dbfname) self.cursor = self.connection.cursor() def __repr__(self): d = self.dbdir if d is None: d = '(in memory)' else: d = repr(d) return '' % (d, unsignedID(self)) def findOrCreate(self, userItemClass, __ifnew=None, **attrs): """ Usage:: s.findOrCreate(userItemClass [, function] [, x=1, y=2, ...]) Example:: class YourItemType(Item): a = integer() b = text() c = integer() def f(x): print x, \"-- it's new!\" s.findOrCreate(YourItemType, f, a=1, b=u'2') Search for an item with columns in the database that match the passed set of keyword arguments, returning the first match if one is found, creating one with the given attributes if not. Takes an optional positional argument function to call on the new item if it is new. """ andargs = [] for k, v in attrs.iteritems(): col = getattr(userItemClass, k) andargs.append(col == v) if len(andargs) == 0: cond = [] elif len(andargs) == 1: cond = [andargs[0]] else: cond = [attributes.AND(*andargs)] for result in self.query(userItemClass, *cond): return result newItem = userItemClass(store=self, **attrs) if __ifnew is not None: __ifnew(newItem) return newItem def newFilePath(self, *path): p = self.filesdir for subdir in path: p = p.child(subdir) return p def newTemporaryFilePath(self, *path): p = self.dbdir.child('temp') for subdir in path: p = p.child(subdir) return p def newFile(self, *path): """ Open a new file somewhere in this Store's file area. @param path: a sequence of path segments. @return: an L{AtomicFile}. """ assert len(path) > 0, "newFile requires a nonzero number of segments" if self.dbdir is None: if self.filesdir is None: raise RuntimeError("This in-memory store has no file directory") else: tmpbase = self.filesdir else: tmpbase = self.dbdir tmpname = tmpbase.child('temp').child(str(tempCounter.next()) + ".tmp") return AtomicFile(tmpname.path, self.newFilePath(*path)) def newDirectory(self, *path): p = self.filesdir for subdir in path: p = p.child(subdir) return p def _loadTypeSchema(self): """ Load all of the stored schema information for all types known by this store. It's important to load everything all at once (rather than loading the schema for each type separately as it is needed) to keep store opening fast. A single query with many results is much faster than many queries with a few results each. @return: A dict with two-tuples of item type name and schema version as keys and lists of five-tuples of attribute schema information for that type. The elements of the five-tuple are:: - a string giving the name of the Python attribute - a string giving the SQL type - a boolean indicating whether the attribute is indexed - the Python attribute type object (eg, axiom.attributes.integer) - a string giving documentation for the attribute """ # Oops, need an index going the other way. This only happens once per # store open, and it's based on data queried from the store, so there # doesn't seem to be any broader way to cache and re-use the result. # However, if we keyed the resulting dict on the database typeID rather # than (typeName, schemaVersion), we wouldn't need the information this # dict gives us. That would mean changing the callers of this function # to use typeID instead of that tuple, which may be possible. Probably # only represents a very tiny possible speedup. typeIDToNameAndVersion = {} for key, value in self.typenameAndVersionToID.iteritems(): typeIDToNameAndVersion[value] = key # Indexing attribute, ordering by it, and getting rid of row_offset # from the schema and the sorted() here doesn't seem to be any faster # than doing this. persistedSchema = sorted(self.querySchemaSQL( "SELECT attribute, type_id, sqltype, indexed, " "pythontype, docstring FROM *DATABASE*.axiom_attributes ")) # This is trivially (but measurably!) faster than getattr(attributes, # pythontype). getAttribute = attributes.__dict__.__getitem__ result = {} for (attribute, typeID, sqltype, indexed, pythontype, docstring) in persistedSchema: key = typeIDToNameAndVersion[typeID] if key not in result: result[key] = [] result[key].append(( attribute, sqltype, indexed, getAttribute(pythontype), docstring)) return result def _checkTypeSchemaConsistency(self, actualType, onDiskSchema): """ Called for all known types at database startup: make sure that what we know (in memory) about this type agrees with what is stored about this type in the database. @param actualType: A L{MetaItem} instance which is associated with a table in this store. The schema it defines in memory will be checked against the schema known in the database to ensure they agree. @param onDiskSchema: A mapping from L{MetaItem} instances (such as C{actualType}) to the schema known in the database and associated with C{actualType}. @raise RuntimeError: if the schema defined by C{actualType} does not match the database-present schema given in C{onDiskSchema} or if C{onDiskSchema} contains a newer version of the schema associated with C{actualType} than C{actualType} represents. """ # make sure that both the runtime and the database both know about this # type; if they don't both know, we can't check that their views are # consistent try: inMemorySchema = _inMemorySchemaCache[actualType] except KeyError: inMemorySchema = _inMemorySchemaCache[actualType] = [ (storedAttribute.attrname, storedAttribute.sqltype) for (name, storedAttribute) in actualType.getSchema()] key = (actualType.typeName, actualType.schemaVersion) persistedSchema = [(storedAttribute[0], storedAttribute[1]) for storedAttribute in onDiskSchema[key]] if inMemorySchema != persistedSchema: raise RuntimeError( "Schema mismatch on already-loaded %r <%r> object version %d: %r != %r" % (actualType, actualType.typeName, actualType.schemaVersion, onDiskSchema, inMemorySchema)) if actualType.__legacy__: return if (key[0], key[1] + 1) in onDiskSchema: raise RuntimeError( "Greater versions of database %r objects in the DB than in memory" % (actualType.typeName,)) # finally find old versions of the data and prepare to upgrade it. def _prepareOldVersionOf(self, typename, version, persistedSchema): """ Note that this database contains old versions of a particular type. Create the appropriate dummy item subclass and queue the type to be upgraded. @param typename: The I{typeName} associated with the schema for which to create a dummy item class. @param version: The I{schemaVersion} of the old version of the schema for which to create a dummy item class. @param persistedSchema: A mapping giving information about all schemas stored in the database, used to create the attributes of the dummy item class. """ appropriateSchema = persistedSchema[typename, version] # create actual attribute objects dummyAttributes = {} for (attribute, sqlType, indexed, pythontype, docstring) in appropriateSchema: atr = pythontype(indexed=indexed, doc=docstring) dummyAttributes[attribute] = atr dummyBases = [] oldType = declareLegacyItem( typename, version, dummyAttributes, dummyBases) self._upgradeManager.queueTypeUpgrade(oldType) return oldType def whenFullyUpgraded(self): """ Return a Deferred which fires when this Store has been fully upgraded. """ if self._upgradeComplete is not None: return self._upgradeComplete.deferred() else: return defer.succeed(None) def getOldVersionOf(self, typename, version): return _legacyTypes[typename, version] # grab the schema for that version # look up upgraders which push it forward def findUnique(self, tableClass, comparison=None, default=_noItem): """ Find an Item in the database which should be unique. If it is found, return it. If it is not found, return 'default' if it was passed, otherwise raise L{errors.ItemNotFound}. If more than one item is found, raise L{errors.DuplicateUniqueItem}. @param comparison: implementor of L{iaxiom.IComparison}. @param default: value to use if the item is not found. """ results = list(self.query(tableClass, comparison, limit=2)) lr = len(results) if lr == 0: if default is _noItem: raise errors.ItemNotFound(comparison) else: return default elif lr == 2: raise errors.DuplicateUniqueItem(comparison, results) elif lr == 1: return results[0] else: raise AssertionError("limit=2 database query returned 3+ results: ", comparison, results) def findFirst(self, tableClass, comparison=None, offset=None, sort=None, default=None): """ Usage:: s.findFirst(tableClass [, query arguments except 'limit']) Example:: class YourItemType(Item): a = integer() b = text() c = integer() ... it = s.findFirst(YourItemType, AND(YourItemType.a == 1, YourItemType.b == u'2'), sort=YourItemType.c.descending) Search for an item with columns in the database that match the passed comparison, offset and sort, returning the first match if one is found, or the passed default (None if none is passed) if one is not found. """ limit = 1 for item in self.query(tableClass, comparison, limit, offset, sort): return item return default def query(self, tableClass, comparison=None, limit=None, offset=None, sort=None): """ Return a generator of instances of C{tableClass}, or tuples of instances if C{tableClass} is a tuple of classes. Examples:: fastCars = s.query(Vehicle, axiom.attributes.AND( Vehicle.wheels == 4, Vehicle.maxKPH > 200), limit=100, sort=Vehicle.maxKPH.descending) quotesByClient = s.query( (Client, Quote), axiom.attributes.AND( Client.active == True, Quote.client == Client.storeID, Quote.created >= someDate), limit=10, sort=(Client.name.ascending, Quote.created.descending)) @param tableClass: a subclass of Item to look for instances of, or a tuple of subclasses. @param comparison: a provider of L{IComparison}, or None, to match all items available in the store. If tableClass is a tuple, then the comparison must refer to all Item subclasses in that tuple, and specify the relationships between them. @param limit: an int to limit the total length of the results, or None for all available results. @param offset: an int to specify a starting point within the available results, or None to start at 0. @param sort: an L{ISort}, something that comes from an SQLAttribute's 'ascending' or 'descending' attribute. @return: an L{ItemQuery} object, which is an iterable of Items or tuples of Items, according to tableClass. """ if isinstance(tableClass, tuple): queryClass = MultipleItemQuery else: queryClass = ItemQuery return queryClass(self, tableClass, comparison, limit, offset, sort) def sum(self, summableAttribute, *a, **k): args = (self, summableAttribute.type) + a return AttributeQuery(attribute=summableAttribute, *args, **k).sum() def count(self, *a, **k): return self.query(*a, **k).count() def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """ class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs) def _loadedItem(self, itemClass, storeID, attrs): if self.objectCache.has(storeID): result = self.objectCache.get(storeID) # XXX do checks on consistency between attrs and DB object, maybe? else: result = itemClass.existingInStore(self, storeID, attrs) if not result.__legacy__: self.objectCache.cache(storeID, result) return result def changed(self, item): """ An item in this store was changed. Add it to the current transaction's list of changed items, if a transaction is currently underway, or raise an exception if this L{Store} is currently in a state which does not allow changes. """ if self._rejectChanges: raise errors.ChangeRejected() if self.transaction is not None: self.transaction.add(item) self.touched.add(item) def checkpoint(self): self._rejectChanges += 1 try: for item in self.touched: # XXX: it should be possible here, using various clever hacks, to # automatically optimize functionally identical statements into # executemany. item.checkpoint() self.touched.clear() finally: self._rejectChanges -= 1 executedThisTransaction = None tablesCreatedThisTransaction = None def transact(self, f, *a, **k): """ Execute C{f(*a, **k)} in the context of a database transaction. Any changes made to this L{Store} by C{f} will be committed when C{f} returns. If C{f} raises an exception, those changes will be reverted instead. If a transaction is already in progress (in this thread - ie, if a frame executing L{Store.transact} is already on the call stack), this will B{not} start a nested transaction. Changes will not be committed until the existing transaction completes, and an exception raised by C{f} will not revert changes made by C{f}. You probably don't want to ever call this if another transaction is in progress. @return: Whatever C{f(*a, **kw)} returns. @raise: Whatever C{f(*a, **kw)} raises, or a database exception. """ if self.transaction is not None: return f(*a, **k) if self.attachedToParent: return self.parent.transact(f, *a, **k) try: self._begin() try: result = f(*a, **k) self.checkpoint() except: exc = Failure() try: self.revert() except: log.err(exc) raise raise else: self._commit() return result finally: self._cleanupTxnState() # The following three methods are necessary... # - in PySQLite: because PySQLite has some buggy transaction handling which # makes it impossible to issue explicit BEGIN statements - which we # _need_ to do to provide guarantees for read/write transactions. def _begin(self): if self.debug: print '<'*10, 'BEGIN', '>'*10 self.cursor.execute("BEGIN IMMEDIATE TRANSACTION") self._setupTxnState() def _setupTxnState(self): self.executedThisTransaction = [] self.tablesCreatedThisTransaction = [] if self.attachedToParent: self.transaction = self.parent.transaction self.touched = self.parent.touched else: self.transaction = set() self.touched = set() self.autocommit = False for sub in self._attachedChildren.values(): sub._setupTxnState() def _commit(self): if self.debug: print '*'*10, 'COMMIT', '*'*10 # self.connection.commit() self.cursor.execute("COMMIT") log.msg(interface=iaxiom.IStatEvent, stat_commits=1) self._postCommitHook() def _postCommitHook(self): self._rejectChanges += 1 try: for committed in self.transaction: committed.committed() finally: self._rejectChanges -= 1 def _rollback(self): if self.debug: print '>'*10, 'ROLLBACK', '<'*10 # self.connection.rollback() self.cursor.execute("ROLLBACK") log.msg(interface=iaxiom.IStatEvent, stat_rollbacks=1) def revert(self): self._rollback() self._inMemoryRollback() def _inMemoryRollback(self): self._rejectChanges += 1 try: for item in self.transaction: item.revert() finally: self._rejectChanges -= 1 self.transaction.clear() for tableClass in self.tablesCreatedThisTransaction: del self.typenameAndVersionToID[tableClass.typeName, tableClass.schemaVersion] # Clear all cache related to this table for cache in (self.typeToInsertSQLCache, self.typeToDeleteSQLCache, self.typeToSelectSQLCache, self.typeToTableNameCache) : if tableClass in cache: del cache[tableClass] if tableClass.storeID in self.attrToColumnNameCache: del self.attrToColumnNameCache[tableClass.storeID] for name, attr in tableClass.getSchema(): if attr in self.attrToColumnNameCache: del self.attrToColumnNameCache[attr] for sub in self._attachedChildren.values(): sub._inMemoryRollback() def _cleanupTxnState(self): self.autocommit = True self.transaction = None self.touched = None self.executedThisTransaction = None self.tablesCreatedThisTransaction = [] for sub in self._attachedChildren.values(): sub._cleanupTxnState() def close(self, _report=True): self.cursor.close() self.cursor = self.connection = None if self.debug and _report: if not self.queryTimes: print 'no queries' else: print 'query:', self.avgms(self.queryTimes) if not self.execTimes: print 'no execs' else: print 'exec:', self.avgms(self.execTimes) def avgms(self, l): return 'count: %d avg: %dus' % (len(l), int( (sum(l)/len(l)) * 1000000.),) def _indexNameOf(self, tableClass, attrname): """ Return the unqualified (ie, no database name) name of the given attribute of the given table. @type tableClass: L{MetaItem} @param tableClass: The Python class associated with a table in the database. @param attrname: A sequence of the names of the columns of the indicated table which will be included in the named index. @return: A C{str} giving the name of the index which will index the given attributes of the given table. """ return "axiomidx_%s_v%d_%s" % (tableClass.typeName, tableClass.schemaVersion, '_'.join(attrname)) def _tableNameFor(self, typename, version): return "%s.item_%s_v%d" % (self.databaseName, typename, version) def getTableName(self, tableClass): """ Retrieve the fully qualified name of the table holding items of a particular class in this store. If the table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @raises axiom.errors.ItemClassesOnly: if an object other than a subclass of Item is passed. @return: a string """ if not (isinstance(tableClass, type) and issubclass(tableClass, item.Item)): raise errors.ItemClassesOnly("Only subclasses of Item have table names.") if tableClass not in self.typeToTableNameCache: self.typeToTableNameCache[tableClass] = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) # make sure the table exists self.getTypeID(tableClass) return self.typeToTableNameCache[tableClass] def getShortColumnName(self, attribute): """ Retreive the column name for a particular attribute in this store. The attribute must be bound to an Item subclass (its type must be valid). If the underlying table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @return: a string XXX: The current implementation does not really match the description, which is actually more restrictive. But it will be true soon, so I guess it is ok for now. The reason is that this method is used during table creation. """ if isinstance(attribute, _StoreIDComparer): return 'oid' return '[' + attribute.attrname + ']' def getColumnName(self, attribute): """ Retreive the fully qualified column name for a particular attribute in this store. The attribute must be bound to an Item subclass (its type must be valid). If the underlying table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @return: a string """ if attribute not in self.attrToColumnNameCache: self.attrToColumnNameCache[attribute] = '.'.join( (self.getTableName(attribute.type), self.getShortColumnName(attribute))) return self.attrToColumnNameCache[attribute] def getTypeID(self, tableClass): """ Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer """ key = (tableClass.typeName, tableClass.schemaVersion) if key in self.typenameAndVersionToID: return self.typenameAndVersionToID[key] return self.transact(self._maybeCreateTable, tableClass, key) def _maybeCreateTable(self, tableClass, key): """ A type ID has been requested for an Item subclass whose table was not present when this Store was opened. Attempt to create the table, and if that fails because another Store object (perhaps in another process) has created the table, re-read the schema. When that's done, return the typeID. This method is internal to the implementation of getTypeID. It must be run in a transaction. @param tableClass: an Item subclass @param key: a 2-tuple of the tableClass's typeName and schemaVersion @return: a typeID for the table; a new one if no table exists, or the existing one if the table was created by another Store object referencing this database. """ sqlstr = [] sqlarg = [] # needs to be calculated including version tableName = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) sqlstr.append("CREATE TABLE %s (" % tableName) for nam, atr in tableClass.getSchema(): # it's a stored attribute sqlarg.append("\n%s %s" % (atr.getShortColumnName(self), atr.sqltype)) if len(sqlarg) == 0: # XXX should be raised way earlier, in the class definition or something raise NoEmptyItems("%r did not define any attributes" % (tableClass,)) sqlstr.append(', '.join(sqlarg)) sqlstr.append(')') try: self.createSQL(''.join(sqlstr)) except errors.TableAlreadyExists: # Although we don't have a memory of this table from the last time # we called "_startup()", another process has updated the schema # since then. self._startup() return self.typenameAndVersionToID[key] typeID = self.executeSchemaSQL(_schema.CREATE_TYPE, [tableClass.typeName, tableClass.__module__, tableClass.schemaVersion]) self.typenameAndVersionToID[key] = typeID if self.tablesCreatedThisTransaction is not None: self.tablesCreatedThisTransaction.append(tableClass) # We can pass () for extantIndexes here because since the table didn't # exist for tableClass, none of its indexes could have either. # Whatever checks _createIndexesFor will make would give the same # result against the actual set of existing indexes as they will # against (). self._createIndexesFor(tableClass, ()) for n, (name, storedAttribute) in enumerate(tableClass.getSchema()): self.executeSchemaSQL( _schema.ADD_SCHEMA_ATTRIBUTE, [typeID, n, storedAttribute.indexed, storedAttribute.sqltype, storedAttribute.allowNone, storedAttribute.attrname, storedAttribute.doc, storedAttribute.__class__.__name__]) # XXX probably need something better for pythontype eventually, # when we figure out a good way to do user-defined attributes or we # start parameterizing references. return typeID def _createIndexesFor(self, tableClass, extantIndexes): """ Create any indexes which don't exist and are required by the schema defined by C{tableClass}. @param tableClass: A L{MetaItem} instance which may define a schema which includes indexes. @param extantIndexes: A container (anything which can be the right-hand argument to the C{in} operator) which contains the unqualified names of all indexes which already exist in the underlying database and do not need to be created. """ try: indexes = _requiredTableIndexes[tableClass] except KeyError: indexes = set() for nam, atr in tableClass.getSchema(): if atr.indexed: indexes.add(((atr.getShortColumnName(self),), (atr.attrname,))) for compound in atr.compoundIndexes: indexes.add((tuple(inatr.getShortColumnName(self) for inatr in compound), tuple(inatr.attrname for inatr in compound))) _requiredTableIndexes[tableClass] = indexes # _ZOMFG_ SQL is such a piece of _shit_: you can't fully qualify the # table name in CREATE INDEX statements because the _INDEX_ is fully # qualified! indexColumnPrefix = '.'.join(self.getTableName(tableClass).split(".")[1:]) for (indexColumns, indexAttrs) in indexes: nameOfIndex = self._indexNameOf(tableClass, indexAttrs) if nameOfIndex in extantIndexes: continue csql = 'CREATE INDEX %s.%s ON %s(%s)' % ( self.databaseName, nameOfIndex, indexColumnPrefix, ', '.join(indexColumns)) self.createSQL(csql) def getTableQuery(self, typename, version): if (typename, version) not in self.tableQueries: query = 'SELECT * FROM %s WHERE oid = ?' % ( self._tableNameFor(typename, version), ) self.tableQueries[typename, version] = query return self.tableQueries[typename, version] def getItemByID(self, storeID, default=_noItem, autoUpgrade=True): """ Retrieve an item by its storeID, and return it. Note: most of the failure modes of this method are catastrophic and should not be handled by application code. The only one that application programmers should be concerned with is KeyError. They are listed for educational purposes. @param storeID: an L{int} which refers to the store. @param default: if passed, return this value rather than raising in the case where no Item is found. @raise TypeError: if storeID is not an integer. @raise UnknownItemType: if the storeID refers to an item row in the database, but the corresponding type information is not available to Python. @raise RuntimeError: if the found item's class version is higher than the current application is aware of. (In other words, if you have upgraded a database to a new schema and then attempt to open it with a previous version of the code.) @raise KeyError: if no item corresponded to the given storeID. @return: an Item, or the given default, if it was passed and no row corresponding to the given storeID can be located in the database. """ if not isinstance(storeID, (int, long)): raise TypeError("storeID *must* be an int or long, not %r" % ( type(storeID).__name__,)) if storeID == STORE_SELF_ID: return self if self.objectCache.has(storeID): return self.objectCache.get(storeID) log.msg(interface=iaxiom.IStatEvent, stat_cache_misses=1, key=storeID) results = self.querySchemaSQL(_schema.TYPEOF_QUERY, [storeID]) assert (len(results) in [1, 0]),\ "Database panic: more than one result for TYPEOF!" if results: typename, module, version = results[0] # for the moment we're going to assume no inheritance attrs = self.querySQL(self.getTableQuery(typename, version), [storeID]) if len(attrs) != 1: if default is _noItem: raise errors.ItemNotFound("No results for known-to-be-good object") return default attrs = attrs[0] useMostRecent = False moreRecentAvailable = False # The schema may have changed since the last time I saw the # database. Let's look to see if this is suspiciously broken... if _typeIsTotallyUnknown(typename, version): # Another process may have created it - let's re-up the schema # and see what we get. self._startup() # OK, all the modules have been loaded now, everything # verified. if _typeIsTotallyUnknown(typename, version): # If there is STILL no inkling of it anywhere, we are # almost certainly boned. Let's tell the user in a # structured way, at least. raise errors.UnknownItemType( "cannot load unknown schema/version pair: %r %r - id: %r" % (typename, version, storeID)) if typename in _typeNameToMostRecentClass: moreRecentAvailable = True mostRecent = _typeNameToMostRecentClass[typename] if mostRecent.schemaVersion < version: raise RuntimeError("%s:%d - was found in the database and most recent %s is %d" % (typename, version, typename, mostRecent.schemaVersion)) if mostRecent.schemaVersion == version: useMostRecent = True if useMostRecent: T = mostRecent else: T = self.getOldVersionOf(typename, version) x = T.existingInStore(self, storeID, attrs) if moreRecentAvailable and (not useMostRecent) and autoUpgrade: # upgradeVersion will do caching as necessary, we don't have to # cache here. (It must, so that app code can safely call # upgradeVersion and get a consistent object out of it.) x = self.transact(self._upgradeManager.upgradeItem, x) elif not x.__legacy__: # We loaded the most recent version of an object self.objectCache.cache(storeID, x) return x if default is _noItem: raise KeyError(storeID) return default def querySchemaSQL(self, sql, args=()): sql = sql.replace("*DATABASE*", self.databaseName) return self.querySQL(sql, args) def querySQL(self, sql, args=()): """For use with SELECT (or SELECT-like PRAGMA) statements. """ if self.debug: result = timeinto(self.queryTimes, self._queryandfetch, sql, args) else: result = self._queryandfetch(sql, args) return result def _queryandfetch(self, sql, args): if self.debug: print '**', sql, '--', ', '.join(map(str, args)) self.cursor.execute(sql, args) before = time.time() result = list(self.cursor) after = time.time() if after - before > 2.0: log.msg('Extremely long list(cursor): %s' % (after - before,)) log.msg(sql) # import traceback; traceback.print_stack() if self.debug: print ' lastrow:', self.cursor.lastRowID() print ' result:', result return result def createSQL(self, sql, args=()): """ For use with auto-committing statements such as CREATE TABLE or CREATE INDEX. """ before = time.time() self._execSQL(sql, args) after = time.time() if after - before > 2.0: log.msg('Extremely long CREATE: %s' % (after - before,)) log.msg(sql) # import traceback; traceback.print_stack() def _execSQL(self, sql, args): if self.debug: rows = timeinto(self.execTimes, self._queryandfetch, sql, args) else: rows = self._queryandfetch(sql, args) assert not rows return sql def executeSchemaSQL(self, sql, args=()): sql = sql.replace("*DATABASE*", self.databaseName) return self.executeSQL(sql, args) def executeSQL(self, sql, args=()): """ For use with UPDATE or INSERT statements. """ sql = self._execSQL(sql, args) result = self.cursor.lastRowID() if self.executedThisTransaction is not None: self.executedThisTransaction.append((result, sql, args)) return result # This isn't actually useful any more. It turns out that the pysqlite # documentation is confusingly worded; it's perfectly possible to create tables # within transactions, but PySQLite's automatic transaction management (which # we turn off) breaks that. However, a function very much like it will be # useful for doing nested transactions without support from the database # itself, so I'm keeping it here commented out as an example. # def _reexecute(self): # assert self.executedThisTransaction is not None # self._begin() # for resultLastTime, sql, args in self.executedThisTransaction: # self._execSQL(sql, args) # resultThisTime = self.cursor.lastRowID() # if resultLastTime != resultThisTime: # raise errors.TableCreationConcurrencyError( # "Expected to get %s as a result " # "of %r:%r, got %s" % ( # resultLastTime, # sql, args, # resultThisTime)) def timeinto(l, f, *a, **k): then = time.time() try: return f(*a, **k) finally: now = time.time() elapsed = now - then l.append(elapsed) queryTimes = [] execTimes = [] Axiom-0.6.0/axiom/substore.py0000644000175000017500000000716410717122321016047 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_substore -*- from zope.interface import implements from twisted.application import service from axiom.iaxiom import IPowerupIndirector from axiom.store import Store from axiom.item import Item from axiom.attributes import path, inmemory, reference from axiom.upgrade import registerUpgrader class SubStore(Item): schemaVersion = 1 typeName = 'substore' storepath = path() substore = inmemory() implements(IPowerupIndirector) def createNew(cls, store, pathSegments): """ Create a new SubStore, allocating a new file space for it. """ if store.dbdir is None: self = cls(store=store, storepath=None) else: storepath = store.newDirectory(*pathSegments) self = cls(store=store, storepath=storepath) self.open() self.close() return self createNew = classmethod(createNew) def close(self): self.substore.close() del self.substore._openSubStore del self.substore def open(self, debug=False): if hasattr(self, 'substore'): return self.substore else: s = self.substore = self.createStore(debug) s._openSubStore = self # don't fall out of cache as long as the # store is alive! return s def createStore(self, debug): """ Create the actual Store this Substore represents. """ if self.storepath is None: self.store._memorySubstores.append(self) # don't fall out of cache if self.store.filesdir is None: filesdir = None else: filesdir = (self.store.filesdir.child("_substore_files") .child(str(self.storeID)) .path) return Store(parent=self.store, filesdir=filesdir, idInParent=self.storeID, debug=debug) else: return Store(self.storepath.path, parent=self.store, idInParent=self.storeID, debug=debug) def __conform__(self, interface): """ I adapt my store object to whatever interface I am adapted to. This allows for avatar adaptation in L{axiom.userbase} to work properly without having to know explicitly that all 'avatars' objects are SubStore instances, since it is valid to have non-SubStore avatars, which are simply adaptable to the cred interfaces they represent. """ ifa = interface(self.open(debug=self.store.debug), None) return ifa def indirect(self, interface): """ Like __conform__, I adapt my store to whatever interface I am asked to produce a powerup for. This allows for app stores to be installed as powerups for their site stores directly, rather than having an additional item type for each interface that we might wish to adapt to. """ return interface(self) class SubStoreStartupService(Item, service.Service): """ This class no longer exists. It is here simply to trigger an upgrade which deletes it. Ignore it, please. """ installedOn = reference() parent = inmemory() running = inmemory() name = inmemory() schemaVersion = 2 def eliminateSubStoreStartupService(subservice): subservice.deleteFromStore() return None registerUpgrader(eliminateSubStoreStartupService, SubStoreStartupService.typeName, 1, 2) Axiom-0.6.0/axiom/tags.py0000644000175000017500000000723610437615621015150 0ustar exarkunexarkun from epsilon.extime import Time from axiom.item import Item from axiom.attributes import text, reference, integer, AND, timestamp class Tag(Item): typeName = 'tag' schemaVersion = 1 name = text(doc=""" The short string which is being applied as a tag to an Item. """) created = timestamp(doc=""" When this tag was applied to the Item to which it applies. """) object = reference(doc=""" The Item to which this tag applies. """) catalog = reference(doc=""" The L{Catalog} item in which this tag was created. """) tagger = reference(doc=""" An optional reference to the Item which is responsible for this tag's existence. """) class _TagName(Item): """ Helper class to make Catalog.tagNames very fast. One of these is created for each distinct tag name that is created. _TagName Items are never deleted from the database. """ typeName = 'tagname' name = text(doc=""" The short string which uniquely represents this tag. """, indexed=True) catalog = reference(doc=""" The L{Catalog} item in which this tag exists. """) class Catalog(Item): typeName = 'tag_catalog' schemaVersion = 2 tagCount = integer(default=0) def tag(self, obj, tagName, tagger=None): """ """ # check to see if that tag exists. Put the object attribute first, # since each object should only have a handful of tags and the object # reference is indexed. As long as this is the case, it doesn't matter # whether the name or catalog attributes are indexed because selecting # from a small set of results is fast even without an index. if self.store.findFirst(Tag, AND(Tag.object == obj, Tag.name == tagName, Tag.catalog == self)): return # if the tag doesn't exist, maybe we need to create a new tagname object self.store.findOrCreate(_TagName, name=tagName, catalog=self) # Increment only if we are creating a new tag self.tagCount += 1 Tag(store=self.store, object=obj, name=tagName, catalog=self, created=Time(), tagger=tagger) def tagNames(self): """ Return an iterator of unicode strings - the unique tag names which have been applied objects in this catalog. """ return self.store.query(_TagName, _TagName.catalog == self).getColumn("name") def tagsOf(self, obj): """ Return an iterator of unicode strings - the tag names which apply to the given object. """ return self.store.query( Tag, AND(Tag.catalog == self, Tag.object == obj)).getColumn("name") def objectsIn(self, tagName): return self.store.query( Tag, AND(Tag.catalog == self, Tag.name == tagName)).getColumn("object") def upgradeCatalog1to2(oldCatalog): """ Create _TagName instances which version 2 of Catalog automatically creates for use in determining the tagNames result, but which version 1 of Catalog did not create. """ newCatalog = oldCatalog.upgradeVersion('tag_catalog', 1, 2, tagCount=oldCatalog.tagCount) tags = newCatalog.store.query(Tag, Tag.catalog == newCatalog) tagNames = tags.getColumn("name").distinct() for t in tagNames: _TagName(store=newCatalog.store, catalog=newCatalog, name=t) return newCatalog from axiom.upgrade import registerUpgrader registerUpgrader(upgradeCatalog1to2, 'tag_catalog', 1, 2) Axiom-0.6.0/axiom/upgrade.py0000644000175000017500000002113711127421367015634 0ustar exarkunexarkun# -*- test-case-name: axiom.test.test_upgrading -*- """ Axiom Item/schema upgrade support. """ from twisted.python.failure import Failure from twisted.python.log import msg from twisted.python.reflect import qual from axiom.errors import NoUpgradePathAvailable, UpgraderRecursion from axiom.errors import ItemUpgradeError from axiom.item import _legacyTypes, _typeNameToMostRecentClass _upgradeRegistry = {} class _StoreUpgrade(object): """ Manage Item upgrades and upgrade batching for a store. @type _currentlyUpgrading: C{dict} @ivar _currentlyUpgrading: A map of storeIDs to Items currently in the middle of an upgrader. Used to make sure that the same item isn't upgraded reentrantly. @type _oldTypesRemaining: C{list} @ivar _oldTypesRemaining: All the old types which have not been fully upgraded in this database. """ def __init__(self, store): self.store = store self._currentlyUpgrading = {} self._oldTypesRemaining = [] def upgradesPending(self): return bool(self._oldTypesRemaining) upgradesPending = property( upgradesPending, doc=""" Flag indicating whether there any types that still need to be upgraded or not. """) def checkUpgradePaths(self): """ Check that all of the accumulated old Item types have a way to get from their current version to the latest version. @raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items that do not have a valid upgrade path """ cantUpgradeErrors = [] for oldVersion in self._oldTypesRemaining: # We have to be able to get from oldVersion.schemaVersion to # the most recent type. currentType = _typeNameToMostRecentClass.get( oldVersion.typeName, None) if currentType is None: # There isn't a current version of this type; it's entirely # legacy, will be upgraded by deleting and replacing with # something else. continue typeInQuestion = oldVersion.typeName upgver = oldVersion.schemaVersion while upgver < currentType.schemaVersion: # Do we have enough of the schema present to upgrade? if ((typeInQuestion, upgver) not in _upgradeRegistry): cantUpgradeErrors.append( "No upgrader present for %s (%s) from %d to %d" % ( typeInQuestion, qual(currentType), upgver, upgver + 1)) # Is there a type available for each upgrader version? if upgver+1 != currentType.schemaVersion: if (typeInQuestion, upgver+1) not in _legacyTypes: cantUpgradeErrors.append( "Type schema required for upgrade missing:" " %s version %d" % ( typeInQuestion, upgver+1)) upgver += 1 if cantUpgradeErrors: raise NoUpgradePathAvailable('\n '.join(cantUpgradeErrors)) def queueTypeUpgrade(self, oldtype): """ Queue a type upgrade for C{oldtype}. """ if oldtype not in self._oldTypesRemaining: self._oldTypesRemaining.append(oldtype) def upgradeItem(self, thisItem): """ Upgrade a legacy item. @raise axiom.errors.UpgraderRecursion: If the given item is already in the process of being upgraded. """ sid = thisItem.storeID if sid in self._currentlyUpgrading: raise UpgraderRecursion() self._currentlyUpgrading[sid] = thisItem try: return upgradeAllTheWay(thisItem) finally: self._currentlyUpgrading.pop(sid) def upgradeEverything(self): """ Upgrade every item in the store, one at a time. @raise axiom.errors.ItemUpgradeError: if an item upgrade failed @return: A generator that yields for each item upgrade. """ return self.upgradeBatch(1) def upgradeBatch(self, n): """ Upgrade the entire store in batches, yielding after each batch. @param n: Number of upgrades to perform per transaction @type n: C{int} @raise axiom.errors.ItemUpgradeError: if an item upgrade failed @return: A generator that yields after each batch upgrade. This needs to be consumed for upgrading to actually take place. """ store = self.store def _doBatch(itemType): upgradedAnything = False for theItem in store.query(itemType, limit=n): upgradedAnything = True try: self.upgradeItem(theItem) except: f = Failure() raise ItemUpgradeError( f, theItem.storeID, itemType, _typeNameToMostRecentClass[itemType.typeName]) return upgradedAnything if self.upgradesPending: didAny = False while self._oldTypesRemaining: t0 = self._oldTypesRemaining[0] upgradedAnything = store.transact(_doBatch, t0) if not upgradedAnything: self._oldTypesRemaining.pop(0) if didAny: msg("%s finished upgrading %s" % (store.dbdir.path, qual(t0))) continue elif not didAny: didAny = True msg("%s beginning upgrade..." % (store.dbdir.path,)) yield None if didAny: msg("%s completely upgraded." % (store.dbdir.path,)) def registerUpgrader(upgrader, typeName, oldVersion, newVersion): """ Register a callable which can perform a schema upgrade between two particular versions. @param upgrader: A one-argument callable which will upgrade an object. It is invoked with an instance of the old version of the object. @param typeName: The database typename for which this is an upgrader. @param oldVersion: The version from which this will upgrade. @param newVersion: The version to which this will upgrade. This must be exactly one greater than C{oldVersion}. """ # assert (typeName, oldVersion, newVersion) not in _upgradeRegistry, "duplicate upgrader" # ^ this makes the tests blow up so it's just disabled for now; perhaps we # should have a specific test mode # assert newVersion - oldVersion == 1, "read the doc string" assert isinstance(typeName, str), "read the doc string" _upgradeRegistry[typeName, oldVersion] = upgrader def registerAttributeCopyingUpgrader(itemType, fromVersion, toVersion, postCopy=None): """ Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will copy all attributes from the legacy item to the new item. If postCopy is provided, it will be called with the new item after upgrading. @param itemType: L{axiom.item.Item} subclass @param postCopy: a callable of one argument @return: None """ def upgrader(old): newitem = old.upgradeVersion(itemType.typeName, fromVersion, toVersion, **dict((str(name), getattr(old, name)) for (name, _) in old.getSchema())) if postCopy is not None: postCopy(newitem) return newitem registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion) def registerDeletionUpgrader(itemType, fromVersion, toVersion): """ Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will delete the item from the database. @param itemType: L{axiom.item.Item} subclass @return: None """ # XXX This should actually do something more special so that a new table is # not created and such. def upgrader(old): old.deleteFromStore() return None registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion) def upgradeAllTheWay(o): assert o.__legacy__ while True: try: upgrader = _upgradeRegistry[o.typeName, o.schemaVersion] except KeyError: break else: o = upgrader(o) if o is None: # Object was explicitly destroyed during upgrading. break return o __all__ = [ 'registerUpgrader', 'registerAttributeCopyingUpgrader', 'registerDeletionUpgrader'] Axiom-0.6.0/axiom/userbase.py0000644000175000017500000005205511224737657016033 0ustar exarkunexarkun# Copright 2008 Divmod, Inc. See LICENSE file for details. # -*- test-case-name: axiom.test.test_userbase -*- """ The L{axiom.userbase} module implements various interfaces from L{twisted.cred} to allow an Axiom database to serve as an integration point for Twisted services that do authentication. While not strictly required, one part of this implementation is the idiom that Axiom (by default) partitions its user database into a separate data-store for each users. This has several advantages: - Each user's account can be quickly and independently added to or removed from the system; inactive accounts can be quickly moved to archival storage. - User accounts may be migrated between servers relatively easily. - Database queries that deal with a single user's data are completely partitioned; even naive and inefficient queries can still be run quickly as long as users do not individually have a lot of data in a particular table. For truly multi-user applications, this partitioning is incomplete without an abstract facility for exchanging data between different users of the same application. This module does not implement such a facility, as it is left to higher-level mechanisms such as Mantissa's messaging system in L{xmantissa.messaging}. However, this module works standalone as well; just be aware that a user's database contains only their own data. """ import warnings from zope.interface import implements, Interface from twisted.cred.portal import IRealm from twisted.cred.credentials import IUsernamePassword, IUsernameHashedPassword from twisted.cred.checkers import ICredentialsChecker, ANONYMOUS from twisted.python import log from axiom.store import Store from axiom.item import Item from axiom.substore import SubStore from axiom.attributes import text, integer, reference, boolean, AND, OR from axiom.errors import ( BadCredentials, NoSuchUser, DuplicateUser, MissingDomainPart) from axiom.scheduler import IScheduler from axiom import upgrade, iaxiom ANY_PROTOCOL = u'*' def dflip(x): warnings.warn("Don't use dflip no more", stacklevel=2) return x class AllNamesConflict(Exception): """ When inserting a SubStore into a site store, no names were found which were not already associated with an account. This prevents the SubStore from being inserted at all. No files are moved and the site database is not modified. """ class DatabaseDirectoryConflict(Exception): """ When inserting a SubStore into a site store, the selected ultimate location for the SubStore's Axiom database directory already existed. This prevents the SubStore from being inserted at all. No files are moved and the site database is not modified. """ class IPreauthCredentials(Interface): """ Deprecated. Don't use this. If you wrote a checker which can check this interface, make it check one of the interfaces L{Preauthenticated} implements, instead. """ class Preauthenticated(object): """ A credentials object of multiple types which has already been authenticated somehow. Credentials interfaces methods are implemented to behave as if the correct credentials had been supplied. """ implements(IUsernamePassword, IUsernameHashedPassword) def __init__(self, username): self.username = username def checkPassword(self, password): """ The password checks out. """ return True def __repr__(self): return '' % (self.username,) class LoginMethod(Item): typeName = 'login_method' schemaVersion = 2 localpart = text(doc=""" A local-part of my user's identifier. """, indexed=True, allowNone=False) domain = text(doc=""" The domain part of my user's identifier. [XXX See TODO below] May be None (generally for "system" users). """, indexed=True) internal = boolean(doc=""" Flag indicating whether this is a method maintained by this server, or if it represents an external contact mechanism (such as a third-party email provider) """, allowNone=False) protocol = text(indexed=True, allowNone=False) account = reference(doc=""" A reference to the LoginAccount for which this is a login method. """, allowNone=False) verified = boolean(indexed=True, allowNone=False) def upgradeLoginMethod1To2(old): return old.upgradeVersion( 'login_method', 1, 2, localpart=old.localpart, domain=old.domain, internal=old.internal, protocol=old.protocol, account=old.account, verified=old.verified) upgrade.registerUpgrader(upgradeLoginMethod1To2, 'login_method', 1, 2) class LoginAccount(Item): """ I am an entry in a LoginBase. @ivar avatars: An Item which is adaptable to various cred client interfaces. Plural because it represents a collection of potentially disparate implementors, such as an IResource for web access and an IContact for SIP access. @ivar disabled: This account has been disabled. It is still database-resident but the user should not be allowed to log in. """ typeName = 'login' schemaVersion = 2 password = text() avatars = reference() # reference to a thing which can be adapted to # implementations for application-level # protocols. In general this is a reference to # a SubStore because this is optimized for # applications where per-user data is a # substantial portion of the cost. disabled = integer() def __conform__(self, interface): """ For convenience, forward adaptation to my 'avatars' attribute. """ ifa = interface(self.avatars, None) return ifa def migrateDown(self): """ Assuming that self.avatars is a SubStore which should contain *only* the LoginAccount for the user I represent, remove all LoginAccounts and LoginMethods from that store and copy all methods from the site store down into it. """ ss = self.avatars.open() def _(): oldAccounts = ss.query(LoginAccount) oldMethods = ss.query(LoginMethod) for x in list(oldAccounts) + list(oldMethods): x.deleteFromStore() self.cloneInto(ss, ss) IScheduler(ss).migrateDown() ss.transact(_) def migrateUp(self): """ Copy this LoginAccount and all associated LoginMethods from my store (which is assumed to be a SubStore, most likely a user store) into the site store which contains it. """ siteStore = self.store.parent def _(): # No convenience method for the following because needing to do it is # *rare*. It *should* be ugly; 99% of the time if you need to do this # you're making a mistake. -glyph siteStoreSubRef = siteStore.getItemByID(self.store.idInParent) self.cloneInto(siteStore, siteStoreSubRef) IScheduler(self.store).migrateUp() siteStore.transact(_) def cloneInto(self, newStore, avatars): """ Create a copy of this LoginAccount and all associated LoginMethods in a different Store. Return the copied LoginAccount. """ la = LoginAccount(store=newStore, password=self.password, avatars=avatars, disabled=self.disabled) for siteMethod in self.store.query(LoginMethod, LoginMethod.account == self): LoginMethod(store=newStore, localpart=siteMethod.localpart, domain=siteMethod.domain, internal=siteMethod.internal, protocol=siteMethod.protocol, verified=siteMethod.verified, account=la) return la def deleteLoginMethods(self): self.store.query(LoginMethod, LoginMethod.account == self).deleteFromStore() def addLoginMethod(self, localpart, domain, protocol=ANY_PROTOCOL, verified=False, internal=False): """ Add a login method to this account, propogating up or down as necessary to site store or user store to maintain consistency. """ # Out takes you west or something if self.store.parent is None: # West takes you in otherStore = self.avatars.open() peer = otherStore.findUnique(LoginAccount) else: # In takes you east otherStore = self.store.parent subStoreItem = self.store.parent.getItemByID(self.store.idInParent) peer = otherStore.findUnique(LoginAccount, LoginAccount.avatars == subStoreItem) # Up and down take you home for store, account in [(otherStore, peer), (self.store, self)]: store.findOrCreate(LoginMethod, account=account, localpart=localpart, domain=domain, protocol=protocol, verified=verified, internal=internal) def insertUserStore(siteStore, userStorePath): """ Move the SubStore at the indicated location into the given site store's directory and then hook it up to the site store's authentication database. @type siteStore: C{Store} @type userStorePath: C{FilePath} """ # The following may, but does not need to be in a transaction, because it # is merely an attempt to guess a reasonable filesystem name to use for # this avatar. The user store being operated on is expected to be used # exclusively by this process. ls = siteStore.findUnique(LoginSystem) unattachedSubStore = Store(userStorePath) for lm in unattachedSubStore.query(LoginMethod, LoginMethod.account == unattachedSubStore.findUnique(LoginAccount), sort=LoginMethod.internal.descending): if ls.accountByAddress(lm.localpart, lm.domain) is None: localpart, domain = lm.localpart, lm.domain break else: raise AllNamesConflict() unattachedSubStore.close() insertLocation = siteStore.newFilePath('account', domain, localpart + '.axiom') insertParentLoc = insertLocation.parent() if not insertParentLoc.exists(): insertParentLoc.makedirs() if insertLocation.exists(): raise DatabaseDirectoryConflict() userStorePath.moveTo(insertLocation) ss = SubStore(store=siteStore, storepath=insertLocation) attachedStore = ss.open() # migrateUp() manages its own transactions because it interacts with two # different stores. attachedStore.findUnique(LoginAccount).migrateUp() def extractUserStore(userAccount, extractionDestination, legacySiteAuthoritative=True): """ Move the SubStore for the given user account out of the given site store completely. Place the user store's database directory into the given destination directory. @type userAccount: C{LoginAccount} @type extractionDestination: C{FilePath} @type legacySiteAuthoritative: C{bool} @param legacySiteAuthoritative: before moving the user store, clear its authentication information, copy that which is associated with it in the site store rather than trusting its own. Currently this flag is necessary (and defaults to true) because things like the ClickChronicle password-changer gizmo still operate on the site store. """ if legacySiteAuthoritative: # migrateDown() manages its own transactions, since it is copying items # between two different stores. userAccount.migrateDown() av = userAccount.avatars av.open().close() def _(): # We're separately deleting several Items from the site store, then # we're moving some files. If we cannot move the files, we don't want # to delete the items. # There is one unaccounted failure mode here: if the destination of the # move is on a different mount point, the moveTo operation will fall # back to a non-atomic copy; if all of the copying succeeds, but then # part of the deletion of the source files fails, we will be left # without a complete store in this site store's files directory, but # the account Items will remain. This will cause odd errors on login # and at other unpredictable times. The database is only one file, so # we will either remove it all or none of it. Resolving this requires # manual intervention currently: delete the substore's database # directory and the account items (LoginAccount and LoginMethods) # manually. # However, this failure is extremely unlikely, as it would almost # certainly indicate a misconfiguration of the permissions on the site # store's files area. As described above, a failure of the call to # os.rename(), if the platform's rename is atomic (which it generally # is assumed to be) will not move any files and will cause a revert of # the transaction which would have deleted the accompanying items. av.deleteFromStore() userAccount.deleteLoginMethods() userAccount.deleteFromStore() av.storepath.moveTo(extractionDestination) userAccount.store.transact(_) def upgradeLoginAccount1To2(oldAccount): password = oldAccount.password if password is not None: try: password = password.decode('ascii') except UnicodeDecodeError: password = None newAccount = oldAccount.upgradeVersion( 'login', 1, 2, password=password, avatars=oldAccount.avatars, disabled=oldAccount.disabled) def make(s, acc): LoginMethod( store=s, localpart=oldAccount.username, domain=oldAccount.domain, internal=False, protocol=u'email', account=acc, verified=True) make(newAccount.store, newAccount) ss = newAccount.avatars.open() # create account in substore to represent the user's own record of their # password; moves with them during migrations, etc. subacc = LoginAccount(store=ss, password=newAccount.password, avatars=ss, disabled=newAccount.disabled) make(ss, subacc) from axiom import upgrade upgrade.registerUpgrader(upgradeLoginAccount1To2, 'login', 1, 2) class SubStoreLoginMixin: def makeAvatars(self, domain, username): return SubStore.createNew(self.store, ('account', domain, username + '.axiom')) class LoginBase: """ I am a database powerup which provides an interface to a collection of username/password pairs mapped to user application objects. """ implements(IRealm, ICredentialsChecker) credentialInterfaces = (IUsernamePassword, IUsernameHashedPassword) powerupInterfaces = (IRealm, ICredentialsChecker) def accountByAddress(self, username, domain): """ @type username: C{unicode} without NUL @type domain: C{unicode} without NUL """ for account in self.store.query(LoginAccount, AND(LoginMethod.domain == domain, LoginMethod.localpart == username, LoginAccount.disabled == 0, LoginMethod.account == LoginAccount.storeID)): return account def addAccount(self, username, domain, password, avatars=None, protocol=u'email', disabled=0, internal=False, verified=True): """ Create a user account, add it to this LoginBase, and return it. This method must be called within a transaction in my store. @param username: the user's name. @param domain: the domain part of the user's name [XXX TODO: this really ought to say something about whether it's a Q2Q domain, a SIP domain, an HTTP realm, or an email address domain - right now the assumption is generally that it's an email address domain, but not always] @param password: A shared secret. @param avatars: (Optional). A SubStore which, if passed, will be used by cred as the target of all adaptations for this user. By default, I will create a SubStore, and plugins can be installed on that substore using the powerUp method to provide implementations of cred client interfaces. @raise DuplicateUniqueItem: if the 'avatars' argument already contains a LoginAccount. @return: an instance of a LoginAccount, with all attributes filled out as they are passed in, stored in my store. """ # unicode(None) == u'None', kids. if username is not None: username = unicode(username) if domain is not None: domain = unicode(domain) if password is not None: password = unicode(password) if self.accountByAddress(username, domain) is not None: raise DuplicateUser(username, domain) if avatars is None: avatars = self.makeAvatars(domain, username) subStore = avatars.open() # create this unconditionally; as the docstring says, we must be run # within a transaction, so if something goes wrong in the substore # transaction this item's creation will be reverted... la = LoginAccount(store=self.store, password=password, avatars=avatars, disabled=disabled) def createSubStoreAccountObjects(): LoginAccount(store=subStore, password=password, disabled=disabled, avatars=subStore) la.addLoginMethod(localpart=username, domain=domain, protocol=protocol, internal=internal, verified=verified) subStore.transact(createSubStoreAccountObjects) return la def logoutFactory(self, obj): return getattr(obj, 'logout', lambda: None) def requestAvatar(self, avatarId, mind, *interfaces): if avatarId is ANONYMOUS: av = self.store else: av = self.store.getItemByID(avatarId) for interface in interfaces: impl = interface(av, None) if impl is not None: self.loginCount += 1 log.msg(interface=iaxiom.IStatEvent, name='cred', cred_interface=interface) return interface, impl, self.logoutFactory(impl) raise NotImplementedError() def requestAvatarId(self, credentials): try: username, domain = credentials.username.split('@', 1) except ValueError: self.failedLogins += 1 raise MissingDomainPart(credentials.username) username = unicode(username) domain = unicode(domain) acct = self.accountByAddress(username, domain) if acct is not None: password = acct.password if credentials.checkPassword(password): return acct.storeID else: self.failedLogins += 1 raise BadCredentials() self.failedLogins += 1 raise NoSuchUser(credentials.username) class LoginSystem(Item, LoginBase, SubStoreLoginMixin): schemaVersion = 1 typeName = 'login_system' loginCount = integer(default=0) failedLogins = integer(default=0) def getLoginMethods(store, protocol=None): """ Retrieve L{LoginMethod} items from store C{store}, optionally constraining them by protocol """ if protocol is not None: comp = OR(LoginMethod.protocol == u'*', LoginMethod.protocol == protocol) else: comp = None return store.query(LoginMethod, comp) def getAccountNames(store, protocol=None): """ Retrieve account name information about the given database. @param store: An Axiom Store representing a user account. It must have been opened through the store which contains its account information. @return: A generator of two-tuples of (username, domain) which refer to the given store. """ return ((meth.localpart, meth.domain) for meth in getLoginMethods(store, protocol)) def getDomainNames(store): """ Retrieve a list of all local domain names represented in the given store. """ domains = set() domains.update(store.query( LoginMethod, AND(LoginMethod.internal == True, LoginMethod.domain != None)).getColumn("domain").distinct()) return sorted(domains) Axiom-0.6.0/bin/0000755000175000017500000000000011304543322013253 5ustar exarkunexarkunAxiom-0.6.0/bin/axiomatic0000755000175000017500000000010710272262634015164 0ustar exarkunexarkun#!/usr/bin/python from axiom.scripts import axiomatic axiomatic.main() Axiom-0.6.0/twisted/0000755000175000017500000000000011304543322014166 5ustar exarkunexarkunAxiom-0.6.0/twisted/plugins/0000755000175000017500000000000011304543322015647 5ustar exarkunexarkunAxiom-0.6.0/twisted/plugins/axiom_plugins.py0000644000175000017500000000313211117560627021107 0ustar exarkunexarkun# Copyright 2008 Divmod, Inc. See LICENSE file for details """ Axiom plugins for Twisted. """ from zope.interface import classProvides from twisted.plugin import IPlugin, getPlugins from twisted.python.usage import Options from twisted.application.service import IServiceMaker, IService, Service class _CheckSystemVersion(Service): """ A service which, when started, updates the stored version information in a store. @ivar store: The L{Store} in which to update version information. """ def __init__(self, store): self.store = store def startService(self): from axiom.listversions import checkSystemVersion checkSystemVersion(self.store) class AxiomaticStart(object): """ L{IServiceMaker} plugin which gets an L{IService} from an Axiom store. """ classProvides(IPlugin, IServiceMaker) tapname = "axiomatic-start" description = "Run an Axiom database (use 'axiomatic start' instead)" class options(Options): optParameters = [ ('dbdir', 'd', None, 'Path containing Axiom database to start')] optFlags = [('debug', 'b', 'Enable Axiom-level debug logging')] def makeService(cls, options): """ Create an L{IService} for the database specified by the given configuration. """ from axiom.store import Store store = Store(options['dbdir'], debug=options['debug']) service = IService(store) _CheckSystemVersion(store).setServiceParent(service) return service makeService = classmethod(makeService) __all__ = ['AxiomaticStart'] Axiom-0.6.0/DEPS.txt0000644000175000017500000000026610444003421013776 0ustar exarkunexarkunPython 2.4 SQLite 3.2.1 Twisted 2.4.0 PySQLite 2.0 NPTL 2.3.5 or later (LinuxThreads not supported: see ) Epsilon 0.5.0 Axiom-0.6.0/LICENSE0000644000175000017500000000203610346653527013527 0ustar exarkunexarkunCopyright (c) 2005 Divmod Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.Axiom-0.6.0/NAME.txt0000644000175000017500000000124110304200243013751 0ustar exarkunexarkun See: http://mathworld.wolfram.com/Axiom.html An axiom is a statement taken as true without proof or supporting arguments. Divmod Axiom is so named because it is a database, and a database is where you put assertions about the world. In particular a database is where you put values which you do not wish to re-calculate; the data that your computation is based upon. In this way axiom items are similar to axioms, since (for example) euclidean geometry can be derived from the set axioms known as "euclid's postulates", but those axioms need to be stored independently; they cannot be derived from anything. Plus it has an X in it, which sounds neat. Axiom-0.6.0/README.txt0000644000175000017500000000162210444003421014175 0ustar exarkunexarkun Divmod Axiom ============ Divmod Axiom is an object database, or alternatively, an object-relational mapper, implemented on top of Python. Note: Axiom currently supports only SQLite and does NOT have any features for dealing with concurrency. We do plan to add some later, and perhaps also support other databases in the future. Its primary goal is to provide an object-oriented layer with what we consider to be the key aspects of OO, i.e. polymorphism and message dispatch, without hindering the power of an RDBMS. Axiom is a live database, not only an SQL generation tool: it includes an implementation of a scheduler service, external file references, automatic upgraders, robust failure handling, and Twisted integration. Axiom is tightly integrated with Twisted, and can store, start, and stop Twisted services directly from the database using the included 'axiomatic' command-line tool. Axiom-0.6.0/setup.py0000644000175000017500000000122511304541016014213 0ustar exarkunexarkunfrom epsilon.setuphelper import autosetup import axiom distobj = autosetup( name="Axiom", version=axiom.version.short(), maintainer="Divmod, Inc.", maintainer_email="support@divmod.org", url="http://divmod.org/trac/wiki/DivmodAxiom", license="MIT", platforms=["any"], description="An in-process object-relational database", classifiers=[ "Development Status :: 5 - Production/Stable", "Framework :: Twisted", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Topic :: Database"], scripts=['bin/axiomatic']) Axiom-0.6.0/PKG-INFO0000644000175000017500000000077511304543322013611 0ustar exarkunexarkunMetadata-Version: 1.0 Name: Axiom Version: 0.6.0 Summary: An in-process object-relational database Home-page: http://divmod.org/trac/wiki/DivmodAxiom Author: Divmod, Inc. Author-email: support@divmod.org License: MIT Description: UNKNOWN Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: Framework :: Twisted Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python Classifier: Topic :: Database