postgresfixture-0.4.2/0000755000175000017500000000000013544636563016513 5ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/PKG-INFO0000644000175000017500000000427013544636563017613 0ustar cjwatsoncjwatson00000000000000Metadata-Version: 2.1 Name: postgresfixture Version: 0.4.2 Summary: A fixture for creating PostgreSQL clusters and databases, and tearing them down again, intended for use during development and testing. Home-page: UNKNOWN License: UNKNOWN Description: .. -*- mode: rst -*- *************** postgresfixture *************** A Python fixture for creating PostgreSQL clusters and databases, and tearing them down again, intended for use during development and testing. For more information see the `Launchpad project page`_. .. _Launchpad project page: https://launchpad.net/postgresfixture Getting started =============== Use like any other fixture:: from contextlib import closing from postgresfixture import ClusterFixture def test_something(self): cluster = self.useFixture(ClusterFixture("db")) cluster.createdb("example") with closing(cluster.connect("example")) as conn: ... cluster.dropbdb("example") # Optional. This will create a new cluster, create a database called "example", then tear it all down at the end; nothing will remain on disk. If you want the cluster and its databases to remain on disk, pass ``preserve=True`` to the ``ClusterFixture`` constructor. From the command line ===================== Once this package is installed, you'll have a ``postgresfixture`` script. Alternatively you can use ``python -m postgresfixture`` to achieve the same thing. Use ``--help`` to discover the options available to you. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Software Development :: Libraries Description-Content-Type: text/x-rst postgresfixture-0.4.2/requirements.txt0000644000175000017500000000011513476464204021767 0ustar cjwatsoncjwatson00000000000000fixtures >= 0.3.8 psycopg2 >= 2.4.4 testtools >= 0.9.14 testscenarios >= 0.4 postgresfixture-0.4.2/setup.py0000755000175000017500000000312213544636555020227 0ustar cjwatsoncjwatson00000000000000#!/usr/bin/env python # Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Distutils installer for postgresfixture.""" from __future__ import ( absolute_import, print_function, ) __metaclass__ = type import codecs from setuptools import ( find_packages, setup, ) with codecs.open("requirements.txt", "rb", encoding="utf-8") as fd: requirements = [line.strip() for line in fd] with open("README.txt") as readme: long_description = readme.read() setup( name='postgresfixture', version="0.4.2", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries', ], packages=find_packages(), install_requires=requirements, tests_require=("testtools >= 0.9.14",), test_suite="postgresfixture.tests", include_package_data=True, zip_safe=False, description=( "A fixture for creating PostgreSQL clusters and databases, and " "tearing them down again, intended for use during development " "and testing."), long_description=long_description, long_description_content_type="text/x-rst", entry_points={ "console_scripts": [ "postgresfixture = postgresfixture.main:main", ], }, ) postgresfixture-0.4.2/MANIFEST.in0000644000175000017500000000003113476464204020236 0ustar cjwatsoncjwatson00000000000000include requirements.txt postgresfixture-0.4.2/postgresfixture/0000755000175000017500000000000013544636563021770 5ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/postgresfixture/main.py0000644000175000017500000001452213476464204023265 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Manage a PostgreSQL cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "main", ] import argparse from os import ( environ, fdopen, ) import pipes import signal from subprocess import CalledProcessError import sys from time import sleep from postgresfixture.cluster import ( PG_VERSION_MAX, PG_VERSIONS, ) from postgresfixture.clusterfixture import ClusterFixture try: from itertools import imap except ImportError: imap = map # Python 3. def setup(): # Ensure stdout and stderr are line-bufferred. sys.stdout = fdopen(sys.stdout.fileno(), "w", 1) sys.stderr = fdopen(sys.stderr.fileno(), "w", 1) # Run the SIGINT handler on SIGTERM; `svc -d` sends SIGTERM. signal.signal(signal.SIGTERM, signal.default_int_handler) def repr_pid(pid): try: pid = int(pid) except ValueError: return pipes.quote(pid) else: try: with open("/proc/%d/cmdline" % pid, "rb") as fd: cmdline = fd.read().rstrip(b"\0").split(b"\0") except IOError: return "%d (*unknown*)" % pid else: cmdline = (arg.decode("ascii", "replace") for arg in cmdline) return "%d (%s)" % (pid, " ".join(imap(pipes.quote, cmdline))) def locked_by_description(lock): pids = sorted(lock.locked_by) return "locked by:\n* %s" % ( "\n* ".join(imap(repr_pid, pids))) def error(*args, **kwargs): kwargs.setdefault("file", sys.stderr) return print(*args, **kwargs) def action_destroy(cluster, arguments): """Destroy the cluster.""" action_stop(cluster, arguments) cluster.destroy() if cluster.exists: if cluster.shares.locked: message = "%s: cluster is %s" % ( cluster.datadir, locked_by_description(cluster.shares)) else: message = "%s: cluster could not be removed." % cluster.datadir error(message) raise SystemExit(2) def action_run(cluster, arguments): """Create and run the cluster.""" database_name = arguments.dbname command = arguments.command with cluster: if database_name is not None: cluster.createdb(database_name) if command is None or len(command) == 0: while cluster.running: sleep(5.0) else: cluster.execute(*command) def action_shell(cluster, arguments): """Spawn a `psql` shell for a database in the cluster.""" database_name = arguments.dbname with cluster: cluster.createdb(database_name) cluster.shell(database_name) def action_status(cluster, arguments): """Display a message about the state of the cluster. The return code is also set: 0 indicates that the cluster is running; 1 indicates that it exists, but is not running; 2 indicates that it does not exist. """ if cluster.exists: if cluster.running: print("%s: running" % cluster.datadir) raise SystemExit(0) else: print("%s: not running" % cluster.datadir) raise SystemExit(1) else: print("%s: not created" % cluster.datadir) raise SystemExit(2) def action_stop(cluster, arguments): """Stop the cluster.""" cluster.stop() if cluster.running: if cluster.shares.locked: message = "%s: cluster is %s" % ( cluster.datadir, locked_by_description(cluster.shares)) else: message = "%s: cluster is still running." % cluster.datadir error(message) raise SystemExit(2) argument_parser = argparse.ArgumentParser(description=__doc__) argument_parser.add_argument( "-D", "--datadir", dest="datadir", action="store", metavar="PGDATA", default="db", help=( "the directory in which to place, or find, the cluster " "(default: %(default)s)")) argument_parser.add_argument( "--preserve", dest="preserve", action="store_true", default=False, help=( "preserve the cluster and its databases when exiting, " "even if it was necessary to create and start it " "(default: %(default)s)")) argument_parser.add_argument( "--version", dest="version", choices=PG_VERSIONS, default=PG_VERSION_MAX, help=( "The version of PostgreSQL to use (default: %(default)s)")) argument_subparsers = argument_parser.add_subparsers( title="actions") def add_action(name, handler, *args, **kwargs): """Configure a subparser for the given name and function.""" parser = argument_subparsers.add_parser( name, *args, help=handler.__doc__, **kwargs) parser.set_defaults(handler=handler) return parser def get_action(name): """Retrieve the named subparser.""" return argument_subparsers.choices[name] # Register actions. add_action("destroy", action_destroy) add_action("run", action_run) add_action("shell", action_shell) add_action("status", action_status) add_action("stop", action_stop) # Customise argument lists for individual actions. get_action("run").add_argument( "-d", "--dbname", dest="dbname", action="store", metavar="PGDATABASE", default=environ.get("PGDATABASE", None), help=( "if specified, the database to create. The default is taken from " "the PGDATABASE environment variable (current default: " "%(default)s).")) get_action("run").add_argument( "command", nargs="*", default=None, help=( "the command to execute (default: %(default)s)")) get_action("shell").add_argument( "-d", "--dbname", dest="dbname", action="store", metavar="PGDATABASE", default=environ.get("PGDATABASE", "data"), help=( "the database to create and connect to. The default is taken from " "the PGDATABASE environment variable, otherwise 'data' (current " "default: %(default)s).")) def main(args=None): args = argument_parser.parse_args(args) try: setup() cluster = ClusterFixture( datadir=args.datadir, preserve=args.preserve, version=args.version) args.handler(cluster, args) except CalledProcessError as error: raise SystemExit(error.returncode) except KeyboardInterrupt: pass postgresfixture-0.4.2/postgresfixture/clusterfixture.py0000644000175000017500000000754313476464204025436 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Manage a PostgreSQL cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "ClusterFixture", ] from errno import ( EEXIST, ENOENT, ENOTEMPTY, ) from os import ( getpid, listdir, makedirs, path, rmdir, unlink, ) from fixtures import Fixture from postgresfixture.cluster import ( Cluster, PG_VERSION_MAX, ) class ProcessSemaphore: """A sort-of-semaphore where it is considered locked if a directory cannot be removed. The locks are taken out one per-process, so this is a way of keeping a reference to a shared resource between processes. """ def __init__(self, lockdir): super(ProcessSemaphore, self).__init__() self.lockdir = lockdir self.lockfile = path.join( self.lockdir, "%d" % getpid()) def acquire(self): try: makedirs(self.lockdir) except OSError as error: if error.errno != EEXIST: raise open(self.lockfile, "w").close() def release(self): try: unlink(self.lockfile) except OSError as error: if error.errno != ENOENT: raise @property def locked(self): try: rmdir(self.lockdir) except OSError as error: if error.errno == ENOTEMPTY: return True elif error.errno == ENOENT: return False else: raise else: return False @property def locked_by(self): try: return [ int(name) if name.isdigit() else name for name in listdir(self.lockdir) ] except OSError as error: if error.errno == ENOENT: return [] else: raise class ClusterFixture(Cluster, Fixture): """A fixture for a `Cluster`.""" def __init__(self, datadir, preserve=False, version=PG_VERSION_MAX): """ @param preserve: Leave the cluster and its databases behind, even if this fixture creates them. """ super(ClusterFixture, self).__init__(datadir, version=version) self.preserve = preserve self.shares = ProcessSemaphore( path.join(self.datadir, "shares")) def setUp(self): super(ClusterFixture, self).setUp() # Only destroy the cluster if we create it... if not self.exists: # ... unless we've been asked to preserve it. if not self.preserve: self.addCleanup(self.destroy) self.create() self.addCleanup(self.stop) self.start() self.addCleanup(self.shares.release) self.shares.acquire() def createdb(self, name): """Create the named database if it does not exist already. Arranges to drop the named database during clean-up, unless `preserve` has been specified. """ if name not in self.databases: super(ClusterFixture, self).createdb(name) if not self.preserve: self.addCleanup(self.dropdb, name) def dropdb(self, name): """Drop the named database if it exists.""" if name in self.databases: super(ClusterFixture, self).dropdb(name) def stop(self): """Stop the cluster, but only if there are no other users.""" if not self.shares.locked: super(ClusterFixture, self).stop() def destroy(self): """Destroy the cluster, but only if there are no other users.""" if not self.shares.locked: super(ClusterFixture, self).destroy() postgresfixture-0.4.2/postgresfixture/tests/0000755000175000017500000000000013544636563023132 5ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/postgresfixture/tests/test_main.py0000644000175000017500000001717613476464204025476 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `postgresfixture.main`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [] from base64 import b16encode from io import StringIO from os import urandom import sys from fixtures import EnvironmentVariableFixture from postgresfixture import main from postgresfixture.cluster import ( PG_VERSION_MAX, PG_VERSIONS, ) from postgresfixture.clusterfixture import ClusterFixture from postgresfixture.testing import TestCase from testtools.matchers import StartsWith class TestActions(TestCase): class Finished(Exception): """A marker exception used for breaking out.""" def get_random_database_name(self): return "db%s" % b16encode(urandom(8)).lower().decode("ascii") def parse_args(self, *args): try: return main.argument_parser.parse_args(args) except SystemExit as error: self.fail("parse_args%r failed with %r" % (args, error)) def test_run(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) database_name = self.get_random_database_name() # Instead of sleeping, check the cluster is running, then break out. def sleep_patch(time): self.assertTrue(cluster.running) self.assertIn(database_name, cluster.databases) raise self.Finished self.patch(main, "sleep", sleep_patch) self.assertRaises( self.Finished, main.action_run, cluster, self.parse_args("run", "--dbname", database_name)) def test_run_without_database(self): # A database is not created if it's not specified in the PGDATABASE # environment variable. cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) # Erase the PGDATABASE environment variable, if it's set. self.useFixture( EnvironmentVariableFixture("PGDATABASE", None)) # Instead of sleeping, check the cluster is running, then break out. def sleep_patch(time): self.assertTrue(cluster.running) self.assertEqual( {"template0", "template1", "postgres"}, cluster.databases) raise self.Finished self.patch(main, "sleep", sleep_patch) self.assertRaises( self.Finished, main.action_run, cluster, self.parse_args("run")) def test_shell(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) database_name = self.get_random_database_name() def shell_patch(database): self.assertEqual(database_name, database) raise self.Finished self.patch(cluster, "shell", shell_patch) self.assertRaises( self.Finished, main.action_shell, cluster, self.parse_args("shell", "--dbname", database_name)) def test_status_running(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) cluster.start() self.patch(sys, "stdout", StringIO()) code = self.assertRaises( SystemExit, main.action_status, cluster, self.parse_args("status")).code self.assertEqual(0, code) self.assertEqual( "%s: running\n" % cluster.datadir, sys.stdout.getvalue()) def test_status_not_running(self): cluster = ClusterFixture(self.make_dir()) cluster.create() self.patch(sys, "stdout", StringIO()) code = self.assertRaises( SystemExit, main.action_status, cluster, self.parse_args("status")).code self.assertEqual(1, code) self.assertEqual( "%s: not running\n" % cluster.datadir, sys.stdout.getvalue()) def test_status_not_created(self): cluster = ClusterFixture(self.make_dir()) self.patch(sys, "stdout", StringIO()) code = self.assertRaises( SystemExit, main.action_status, cluster, self.parse_args("status")).code self.assertEqual(2, code) self.assertEqual( "%s: not created\n" % cluster.datadir, sys.stdout.getvalue()) def test_stop(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) cluster.start() main.action_stop(cluster, self.parse_args("stop")) self.assertFalse(cluster.running) self.assertTrue(cluster.exists) def test_stop_when_share_locked(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) cluster.start() self.addCleanup(cluster.shares.release) cluster.shares.acquire() self.patch(sys, "stderr", StringIO()) error = self.assertRaises( SystemExit, main.action_stop, cluster, self.parse_args("stop")) self.assertEqual(2, error.code) self.assertThat( sys.stderr.getvalue(), StartsWith( "%s: cluster is locked by:" % cluster.datadir)) self.assertTrue(cluster.running) def test_destroy(self): cluster = ClusterFixture(self.make_dir()) self.addCleanup(cluster.stop) cluster.start() main.action_destroy(cluster, self.parse_args("destroy")) self.assertFalse(cluster.running) self.assertFalse(cluster.exists) def test_destroy_when_share_locked(self): cluster = ClusterFixture(self.make_dir()) cluster.create() cluster.shares.acquire() self.patch(sys, "stderr", StringIO()) error = self.assertRaises( SystemExit, main.action_destroy, cluster, self.parse_args("destroy")) self.assertEqual(2, error.code) self.assertThat( sys.stderr.getvalue(), StartsWith( "%s: cluster is locked by:" % cluster.datadir)) self.assertTrue(cluster.exists) class TestVersion(TestCase): def patch_pg_versions(self, versions): PG_VERSIONS[:] = versions def test_uses_supplied_version(self): # Reset PG_VERSIONS after the test has run. self.addCleanup(self.patch_pg_versions, list(PG_VERSIONS)) self.patch_pg_versions(["1.1", "2.2", "3.3"]) # Record calls to our patched handler. handler_calls = [] def handler(cluster, args): handler_calls.append((cluster, args)) self.patch( main.get_action("status"), "_defaults", {"handler": handler}) # Prevent main() from altering terminal settings. self.patch(main, "setup", lambda: None) # The version chosen is picked up by the argument parser and # passed into the Cluster constructor. main.main(["--version", "2.2", "status"]) self.assertEqual( [("2.2", "2.2")], [(cluster.version, args.version) for (cluster, args) in handler_calls]) def test_uses_default_version(self): # Record calls to our patched handler. handler_calls = [] def handler(cluster, args): handler_calls.append((cluster, args)) self.patch( main.get_action("status"), "_defaults", {"handler": handler}) # The argument parser has the default version and passes it into # the Cluster constructor. main.main(["status"]) self.assertEqual( [(PG_VERSION_MAX, PG_VERSION_MAX)], [(cluster.version, args.version) for (cluster, args) in handler_calls]) postgresfixture-0.4.2/postgresfixture/tests/test_cluster.py0000644000175000017500000002273113476464204026224 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `postgresfixture.cluster`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [] from contextlib import closing import io from os import ( getpid, path, ) import subprocess from subprocess import ( CalledProcessError, PIPE, Popen, ) import sys from textwrap import dedent import postgresfixture.cluster from postgresfixture.cluster import ( Cluster, get_pg_bin, path_with_pg_bin, PG_VERSIONS, ) from postgresfixture.main import repr_pid from postgresfixture.testing import TestCase from postgresfixture.utils import LockAlreadyTaken from testscenarios import WithScenarios from testtools import ExpectedException from testtools.content import text_content from testtools.matchers import ( DirExists, FileExists, Not, StartsWith, ) class TestFunctions(WithScenarios, TestCase): scenarios = sorted( (version, {"version": version}) for version in PG_VERSIONS ) def test_path_with_pg_bin(self): pg_bin = get_pg_bin(self.version) self.assertEqual(pg_bin, path_with_pg_bin("", self.version)) self.assertEqual( pg_bin + path.pathsep + "/bin:/usr/bin", path_with_pg_bin("/bin:/usr/bin", self.version)) def test_repr_pid_not_a_number(self): self.assertEqual("alice", repr_pid("alice")) self.assertEqual("'alice and bob'", repr_pid("alice and bob")) def test_repr_pid_not_a_process(self): self.assertEqual("0 (*unknown*)", repr_pid(0)) def test_repr_pid_this_process(self): pid = getpid() self.assertThat(repr_pid(pid), StartsWith("%d (" % pid)) class TestCluster(WithScenarios, TestCase): scenarios = sorted( (version, {"version": version}) for version in PG_VERSIONS ) def make(self, *args, **kwargs): kwargs.setdefault("version", self.version) return Cluster(*args, **kwargs) def test_init(self): # The datadir passed into the Cluster constructor is resolved to an # absolute path. tmpdir = self.make_dir() datadir = path.join(tmpdir, "somewhere") cluster = self.make(path.relpath(datadir)) self.assertEqual(datadir, cluster.datadir) # The lock file is in the parent directory of the data directory. self.assertEqual( path.join(tmpdir, ".somewhere.lock"), cluster.lock.path) def test_lock(self): # To test the lock - based on lockf - we take the lock locally then # check if it appears locked from a separate process. cluster = self.make(self.make_dir()) script = dedent("""\ from errno import EAGAIN from fcntl import LOCK_EX, LOCK_NB, lockf with open(%r, "ab") as fd: try: lockf(fd, LOCK_EX | LOCK_NB) except IOError as error: if error.errno != EAGAIN: raise else: raise AssertionError("Not locked") """) % cluster.lock.path with cluster.lock.exclusive: process = Popen( sys.executable, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate(script.encode("ascii")) self.addDetail("stdout", text_content(stdout.decode("ascii"))) self.addDetail("stderr", text_content(stderr.decode("ascii"))) self.assertEqual(0, process.returncode) def test_exclusive_lock_is_not_reentrant(self): # The lock cannot be acquired more than once. cluster = self.make(self.make_dir()) with cluster.lock.exclusive: with ExpectedException(LockAlreadyTaken): with cluster.lock.exclusive: pass # We won't get here. def test_shared_lock_is_not_reentrant(self): # The lock cannot be acquired more than once. cluster = self.make(self.make_dir()) with cluster.lock.shared: with ExpectedException(LockAlreadyTaken): with cluster.lock.shared: pass # We won't get here. def patch_check_call(self, returncode=0): calls = [] def check_call(command, **options): calls.append((command, options)) if returncode != 0: raise CalledProcessError(returncode, command) self.patch(postgresfixture.cluster, "check_call", check_call) return calls def test_execute(self): calls = self.patch_check_call() cluster = self.make(self.make_dir()) cluster.execute("true") [(command, options)] = calls self.assertEqual(("true",), command) self.assertIn("env", options) env = options["env"] self.assertEqual(cluster.datadir, env.get("PGDATA")) self.assertEqual(cluster.datadir, env.get("PGHOST")) self.assertThat( env.get("PATH", ""), StartsWith(get_pg_bin(self.version) + path.pathsep)) def test_exists(self): cluster = self.make(self.make_dir()) # The PG_VERSION file is used as a marker of existence. version_file = path.join(cluster.datadir, "PG_VERSION") self.assertThat(version_file, Not(FileExists())) self.assertFalse(cluster.exists) open(version_file, "wb").close() self.assertTrue(cluster.exists) def test_pidfile(self): self.assertEqual( "/some/where/postmaster.pid", self.make("/some/where").pidfile) def test_logfile(self): self.assertEqual( "/some/where/backend.log", self.make("/some/where").logfile) def test_running_calls_pg_ctl(self): calls = self.patch_check_call(returncode=0) cluster = self.make(self.make_dir()) self.assertTrue(cluster.running) [(command, options)] = calls self.assertEqual(("pg_ctl", "status"), command) def test_running(self): cluster = self.make(self.make_dir()) cluster.start() self.assertTrue(cluster.running) def test_running_not(self): cluster = self.make(self.make_dir()) self.assertFalse(cluster.running) def test_running_error(self): self.patch_check_call(returncode=2) # Unrecognised code. cluster = self.make(self.make_dir()) self.assertRaises( CalledProcessError, getattr, cluster, "running") def test_running_captures_stderr(self): # stderr is captured when running pg_ctl and not normally printed. def check_call(command, **options): return subprocess.check_call( ('/bin/sh', '-c', 'echo foobar >&2 && exit 0'), **options) self.patch(postgresfixture.cluster, "check_call", check_call) self.patch(sys, "stderr", io.BytesIO()) cluster = self.make(self.make_dir()) self.assertTrue(cluster.running) self.assertEqual(b"", sys.stderr.getvalue()) def test_running_captures_and_replays_stderr_on_error(self): # stderr is captured when running pg_ctl and replayed on error. def check_call(command, **options): return subprocess.check_call( ('/bin/sh', '-c', 'echo foobar >&2 && exit 2'), **options) self.patch(postgresfixture.cluster, "check_call", check_call) self.patch(sys, "stderr", io.BytesIO()) cluster = self.make(self.make_dir()) self.assertRaises(CalledProcessError, getattr, cluster, "running") self.assertEqual(b"foobar\n", sys.stderr.getvalue()) def test_create(self): cluster = self.make(self.make_dir()) cluster.create() self.assertTrue(cluster.exists) self.assertFalse(cluster.running) def test_start_and_stop(self): cluster = self.make(self.make_dir()) cluster.create() try: cluster.start() self.assertTrue(cluster.running) finally: cluster.stop() self.assertFalse(cluster.running) def test_connect(self): cluster = self.make(self.make_dir()) cluster.create() self.addCleanup(cluster.stop) cluster.start() with closing(cluster.connect()) as conn: with closing(conn.cursor()) as cur: cur.execute("SELECT 1") self.assertEqual([(1,)], cur.fetchall()) def test_databases(self): cluster = self.make(self.make_dir()) cluster.create() self.addCleanup(cluster.stop) cluster.start() self.assertEqual( {"postgres", "template0", "template1"}, cluster.databases) def test_createdb_and_dropdb(self): cluster = self.make(self.make_dir()) cluster.create() self.addCleanup(cluster.stop) cluster.start() cluster.createdb("setherial") self.assertEqual( {"postgres", "template0", "template1", "setherial"}, cluster.databases) cluster.dropdb("setherial") self.assertEqual( {"postgres", "template0", "template1"}, cluster.databases) def test_destroy(self): cluster = self.make(self.make_dir()) cluster.create() cluster.destroy() self.assertFalse(cluster.exists) self.assertFalse(cluster.running) self.assertThat(cluster.datadir, Not(DirExists())) postgresfixture-0.4.2/postgresfixture/tests/test_clusterfixture.py0000644000175000017500000001324513476464204027633 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `postgresfixture.clusterfixture`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [] from os import ( getpid, path, ) from postgresfixture.clusterfixture import ( ClusterFixture, ProcessSemaphore, ) from postgresfixture.testing import TestCase from postgresfixture.tests import test_cluster from testtools.matchers import ( FileExists, Not, ) class TestProcessSemaphore(TestCase): def test_init(self): lockdir = self.make_dir() psem = ProcessSemaphore(lockdir) self.assertEqual(lockdir, psem.lockdir) self.assertEqual( path.join(lockdir, "%s" % getpid()), psem.lockfile) def test_acquire(self): psem = ProcessSemaphore( path.join(self.make_dir(), "locks")) psem.acquire() self.assertThat(psem.lockfile, FileExists()) self.assertTrue(psem.locked) self.assertEqual([getpid()], psem.locked_by) def test_release(self): psem = ProcessSemaphore( path.join(self.make_dir(), "locks")) psem.acquire() psem.release() self.assertThat(psem.lockfile, Not(FileExists())) self.assertFalse(psem.locked) self.assertEqual([], psem.locked_by) class TestClusterFixture(test_cluster.TestCluster): def make(self, *args, **kwargs): kwargs.setdefault("version", self.version) fixture = ClusterFixture(*args, **kwargs) # Run the basic fixture set-up so that clean-ups can be added. super(ClusterFixture, fixture).setUp() return fixture def test_init_fixture(self): fixture = self.make("/some/where") self.assertEqual(False, fixture.preserve) self.assertIsInstance(fixture.shares, ProcessSemaphore) self.assertEqual( path.join(fixture.datadir, "shares"), fixture.shares.lockdir) def test_createdb_no_preserve(self): fixture = self.make(self.make_dir(), preserve=False) self.addCleanup(fixture.stop) fixture.start() fixture.createdb("danzig") self.assertIn("danzig", fixture.databases) # The database is only created if it does not already exist. fixture.createdb("danzig") # Creating a database arranges for it to be dropped when stopping the # fixture. fixture.cleanUp() self.assertNotIn("danzig", fixture.databases) def test_createdb_preserve(self): fixture = self.make(self.make_dir(), preserve=True) self.addCleanup(fixture.stop) fixture.start() fixture.createdb("emperor") self.assertIn("emperor", fixture.databases) # The database is only created if it does not already exist. fixture.createdb("emperor") # Creating a database arranges for it to be dropped when stopping the # fixture. fixture.cleanUp() self.assertIn("emperor", fixture.databases) def test_dropdb(self): fixture = self.make(self.make_dir()) self.addCleanup(fixture.stop) fixture.start() # The database is only dropped if it exists. fixture.dropdb("diekrupps") fixture.dropdb("diekrupps") # The test is that we arrive here without error. def test_stop_share_locked(self): # The cluster is not stopped if a shared lock is held. fixture = self.make(self.make_dir()) self.addCleanup(fixture.stop) fixture.start() fixture.shares.acquire() fixture.stop() self.assertTrue(fixture.running) fixture.shares.release() fixture.stop() self.assertFalse(fixture.running) def test_destroy_share_locked(self): # The cluster is not destroyed if a shared lock is held. fixture = self.make(self.make_dir()) fixture.create() fixture.shares.acquire() fixture.destroy() self.assertTrue(fixture.exists) fixture.shares.release() fixture.destroy() self.assertFalse(fixture.exists) def test_use_no_preserve(self): # The cluster is stopped and destroyed when preserve=False. with self.make(self.make_dir(), preserve=False) as fixture: self.assertTrue(fixture.exists) self.assertTrue(fixture.running) self.assertFalse(fixture.exists) self.assertFalse(fixture.running) def test_use_no_preserve_cluster_already_exists(self): # The cluster is stopped but *not* destroyed when preserve=False if it # existed before the fixture was put into use. fixture = self.make(self.make_dir(), preserve=False) fixture.create() with fixture: self.assertTrue(fixture.exists) self.assertTrue(fixture.running) self.assertTrue(fixture.exists) self.assertFalse(fixture.running) def test_use_preserve(self): # The cluster is not stopped and destroyed when preserve=True. with self.make(self.make_dir(), preserve=True) as fixture: self.assertTrue(fixture.exists) self.assertTrue(fixture.running) fixture.createdb("gallhammer") self.assertTrue(fixture.exists) self.assertFalse(fixture.running) self.addCleanup(fixture.stop) fixture.start() self.assertIn("gallhammer", fixture.databases) def test_namespace(self): # ClusterFixture is in the postgresfixture namespace. import postgresfixture self.assertIs(postgresfixture.ClusterFixture, ClusterFixture) postgresfixture-0.4.2/postgresfixture/tests/__init__.py0000644000175000017500000000000013476464204025224 0ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/postgresfixture/cluster.py0000644000175000017500000002001613476464204024015 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Manage a PostgreSQL cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "Cluster", "PG_VERSION_MAX", "PG_VERSIONS", ] from contextlib import closing from distutils.version import LooseVersion from glob import iglob from os import ( devnull, environ, makedirs, path, ) import pipes from shutil import ( copyfileobj, rmtree, ) from subprocess import ( CalledProcessError, check_call, ) import sys from tempfile import TemporaryFile from postgresfixture.utils import LockFile import psycopg2 PG_BASE = "/usr/lib/postgresql" PG_VERSION_BINS = { path.basename(pgdir): path.join(pgdir, "bin") for pgdir in iglob(path.join(PG_BASE, "*")) if path.exists(path.join(pgdir, "bin", "pg_ctl")) } PG_VERSION_MAX = max(PG_VERSION_BINS, key=LooseVersion) PG_VERSIONS = sorted(PG_VERSION_BINS, key=LooseVersion) def get_pg_bin(version): """Return the PostgreSQL ``bin`` directory for the given `version`.""" return PG_VERSION_BINS[version] def path_with_pg_bin(exe_path, version): """Return `exe_path` with the PostgreSQL ``bin`` directory added.""" exe_path = [ elem for elem in exe_path.split(path.pathsep) if len(elem) != 0 and not elem.isspace() ] pg_bin = get_pg_bin(version) if pg_bin not in exe_path: exe_path.insert(0, pg_bin) return path.pathsep.join(exe_path) class Cluster: """Represents a PostgreSQL cluster, running or not.""" def __init__(self, datadir, version=PG_VERSION_MAX): self.datadir = path.abspath(datadir) self.version = version self.lock = LockFile(path.join( path.dirname(self.datadir), ".%s.lock" % path.basename(self.datadir))) def execute(self, *command, **options): """Execute a command with an environment suitable for this cluster.""" env = options.pop("env", environ).copy() env["PATH"] = path_with_pg_bin(env.get("PATH", ""), self.version) env["PGDATA"] = env["PGHOST"] = self.datadir check_call(command, env=env, **options) @property def exists(self): """Whether or not this cluster exists on disk.""" version_file = path.join(self.datadir, "PG_VERSION") return path.exists(version_file) @property def pidfile(self): """The (expected) pidfile for a running cluster. Does *not* guarantee that the pidfile exists. """ return path.join(self.datadir, "postmaster.pid") @property def logfile(self): """The log file used (or will be used) by this cluster.""" return path.join(self.datadir, "backend.log") @property def running(self): """Whether this cluster is running or not.""" with open(devnull, "wb") as stdout, TemporaryFile() as stderr: try: self.execute("pg_ctl", "status", stdout=stdout, stderr=stderr) except CalledProcessError as error: # PostgreSQL has evolved to return different error codes in # later versions, so here we check for specific codes to avoid # masking errors from insufficient permissions or missing # executables, for example. version = LooseVersion(self.version) if version >= LooseVersion("9.4"): if error.returncode == 3: # 3 means that the data directory is present and # accessible but that the server is not running. return False elif error.returncode == 4: # 4 means that the data directory is not present or is # not accessible. If it's missing, then the server is # not running. If it is present but not accessible # then crash because we can't know if the server is # running or not. if not self.exists: return False elif version >= LooseVersion("9.2"): if error.returncode == 3: # 3 means that the data directory is present and # accessible but that the server is not running OR # that the data directory is not present. return False else: if error.returncode == 1: # 1 means that the server is not running OR the data # directory is not present OR that the data directory # is not accessible. return False # This is not a recognised error. First print out the cached # stderr then re-raise the CalledProcessError. try: stderr.seek(0) # Rewind. copyfileobj(stderr, sys.stderr) finally: raise else: return True def create(self): """Create this cluster, if it does not exist.""" with self.lock.exclusive: self._create() def _create(self): if not self.exists: if not path.isdir(self.datadir): makedirs(self.datadir) self.execute("pg_ctl", "init", "-s", "-o", "-E utf8 -A trust") def start(self): """Start this cluster, if it's not already started.""" with self.lock.exclusive: self._start() def _start(self): if not self.running: self._create() # pg_ctl options: # -l -- log file. # -s -- no informational messages. # -w -- wait until startup is complete. # postgres options: # -h -- host name; empty arg means Unix socket only. # -F -- don't bother fsync'ing. # -k -- socket directory. self.execute( "pg_ctl", "start", "-l", self.logfile, "-s", "-w", "-o", "-h '' -F -k %s" % pipes.quote(self.datadir)) def connect(self, database="template1", autocommit=True): """Connect to this cluster.""" connection = psycopg2.connect( database=database, host=self.datadir) connection.autocommit = autocommit return connection def shell(self, database="template1"): self.execute("psql", "--quiet", "--", database) @property def databases(self): """The names of databases in this cluster.""" with closing(self.connect("postgres")) as conn: with closing(conn.cursor()) as cur: cur.execute("SELECT datname FROM pg_catalog.pg_database") return {name for (name,) in cur.fetchall()} def createdb(self, name): """Create the named database.""" with closing(self.connect()) as conn: with closing(conn.cursor()) as cur: cur.execute("CREATE DATABASE %s" % name) def dropdb(self, name): """Drop the named database.""" with closing(self.connect()) as conn: with closing(conn.cursor()) as cur: cur.execute("DROP DATABASE %s" % name) def stop(self): """Stop this cluster, if started.""" with self.lock.exclusive: self._stop() def _stop(self): if self.running: # pg_ctl options: # -w -- wait for shutdown to complete. # -m -- shutdown mode. self.execute("pg_ctl", "stop", "-s", "-w", "-m", "fast") def destroy(self): """Destroy this cluster, if it exists. The cluster will be stopped if it's started. """ with self.lock.exclusive: self._destroy() def _destroy(self): if self.exists: self._stop() rmtree(self.datadir) postgresfixture-0.4.2/postgresfixture/testing/0000755000175000017500000000000013544636563023445 5ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/postgresfixture/testing/__init__.py0000644000175000017500000000102413476464204025546 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing resources for `postgresfixture`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "TestCase", ] from fixtures import TempDir import testtools class TestCase(testtools.TestCase): """Convenience subclass.""" def make_dir(self): return self.useFixture(TempDir()).path postgresfixture-0.4.2/postgresfixture/__init__.py0000644000175000017500000000061313476464204024074 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """PostgreSQL cluster fixture.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "ClusterFixture", ] from postgresfixture.clusterfixture import ClusterFixture postgresfixture-0.4.2/postgresfixture/__main__.py0000644000175000017500000000057713476464204024066 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Entry point for `postgresfixture` on the command-line.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [] from postgresfixture.main import main main() postgresfixture-0.4.2/postgresfixture/utils.py0000644000175000017500000000410713476464204023477 0ustar cjwatsoncjwatson00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Manage a PostgreSQL cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "LockFile", "LockAlreadyTaken", ] from contextlib import contextmanager from fcntl import ( LOCK_EX, LOCK_SH, LOCK_UN, lockf, ) import io import os import threading class LockFile: """A non-reentrant lock based on `lockf`.""" def __init__(self, path): super(LockFile, self).__init__() self._lock = threading.Lock() self._path = path @property def exclusive(self): """A context manager for an exclusive lock.""" return self._take(LOCK_EX) @property def shared(self): """A context manager for a shared lock.""" return self._take(LOCK_SH) @property def path(self): """The filesystem path to the lock file.""" return self._path @contextmanager def _take(self, mode): if self._lock.acquire(False): try: # Open with os.open() so that we create the file even when # opening read-only, using O_CREAT. Then we have to do a # little dance with io.open for lockf's benefit. oflag = os.O_RDWR if mode == LOCK_EX else os.O_RDONLY fileno = os.open(self._path, os.O_CREAT | oflag, 0o600) fmode = "ab" if mode == LOCK_EX else "rb" with io.open(fileno, fmode) as handle: lockf(handle, mode) try: yield finally: lockf(handle, LOCK_UN) finally: self._lock.release() else: raise LockAlreadyTaken(self) class LockAlreadyTaken(Exception): """A lock has already been taken.""" def __init__(self, lock): super(LockAlreadyTaken, self).__init__(lock.path) self.lock = lock postgresfixture-0.4.2/setup.cfg0000644000175000017500000000004613544636563020334 0ustar cjwatsoncjwatson00000000000000[egg_info] tag_build = tag_date = 0 postgresfixture-0.4.2/README.txt0000644000175000017500000000231613476464204020206 0ustar cjwatsoncjwatson00000000000000.. -*- mode: rst -*- *************** postgresfixture *************** A Python fixture for creating PostgreSQL clusters and databases, and tearing them down again, intended for use during development and testing. For more information see the `Launchpad project page`_. .. _Launchpad project page: https://launchpad.net/postgresfixture Getting started =============== Use like any other fixture:: from contextlib import closing from postgresfixture import ClusterFixture def test_something(self): cluster = self.useFixture(ClusterFixture("db")) cluster.createdb("example") with closing(cluster.connect("example")) as conn: ... cluster.dropbdb("example") # Optional. This will create a new cluster, create a database called "example", then tear it all down at the end; nothing will remain on disk. If you want the cluster and its databases to remain on disk, pass ``preserve=True`` to the ``ClusterFixture`` constructor. From the command line ===================== Once this package is installed, you'll have a ``postgresfixture`` script. Alternatively you can use ``python -m postgresfixture`` to achieve the same thing. Use ``--help`` to discover the options available to you. postgresfixture-0.4.2/postgresfixture.egg-info/0000755000175000017500000000000013544636563023462 5ustar cjwatsoncjwatson00000000000000postgresfixture-0.4.2/postgresfixture.egg-info/PKG-INFO0000644000175000017500000000427013544636563024562 0ustar cjwatsoncjwatson00000000000000Metadata-Version: 2.1 Name: postgresfixture Version: 0.4.2 Summary: A fixture for creating PostgreSQL clusters and databases, and tearing them down again, intended for use during development and testing. Home-page: UNKNOWN License: UNKNOWN Description: .. -*- mode: rst -*- *************** postgresfixture *************** A Python fixture for creating PostgreSQL clusters and databases, and tearing them down again, intended for use during development and testing. For more information see the `Launchpad project page`_. .. _Launchpad project page: https://launchpad.net/postgresfixture Getting started =============== Use like any other fixture:: from contextlib import closing from postgresfixture import ClusterFixture def test_something(self): cluster = self.useFixture(ClusterFixture("db")) cluster.createdb("example") with closing(cluster.connect("example")) as conn: ... cluster.dropbdb("example") # Optional. This will create a new cluster, create a database called "example", then tear it all down at the end; nothing will remain on disk. If you want the cluster and its databases to remain on disk, pass ``preserve=True`` to the ``ClusterFixture`` constructor. From the command line ===================== Once this package is installed, you'll have a ``postgresfixture`` script. Alternatively you can use ``python -m postgresfixture`` to achieve the same thing. Use ``--help`` to discover the options available to you. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Software Development :: Libraries Description-Content-Type: text/x-rst postgresfixture-0.4.2/postgresfixture.egg-info/entry_points.txt0000644000175000017500000000007713544636563026764 0ustar cjwatsoncjwatson00000000000000[console_scripts] postgresfixture = postgresfixture.main:main postgresfixture-0.4.2/postgresfixture.egg-info/dependency_links.txt0000644000175000017500000000000113544636563027530 0ustar cjwatsoncjwatson00000000000000 postgresfixture-0.4.2/postgresfixture.egg-info/requires.txt0000644000175000017500000000010513544636563026056 0ustar cjwatsoncjwatson00000000000000fixtures>=0.3.8 psycopg2>=2.4.4 testscenarios>=0.4 testtools>=0.9.14 postgresfixture-0.4.2/postgresfixture.egg-info/not-zip-safe0000644000175000017500000000000113524110537025672 0ustar cjwatsoncjwatson00000000000000 postgresfixture-0.4.2/postgresfixture.egg-info/top_level.txt0000644000175000017500000000002013544636563026204 0ustar cjwatsoncjwatson00000000000000postgresfixture postgresfixture-0.4.2/postgresfixture.egg-info/SOURCES.txt0000644000175000017500000000124413544636563025347 0ustar cjwatsoncjwatson00000000000000MANIFEST.in README.txt requirements.txt setup.py postgresfixture/__init__.py postgresfixture/__main__.py postgresfixture/cluster.py postgresfixture/clusterfixture.py postgresfixture/main.py postgresfixture/utils.py postgresfixture.egg-info/PKG-INFO postgresfixture.egg-info/SOURCES.txt postgresfixture.egg-info/dependency_links.txt postgresfixture.egg-info/entry_points.txt postgresfixture.egg-info/not-zip-safe postgresfixture.egg-info/requires.txt postgresfixture.egg-info/top_level.txt postgresfixture/testing/__init__.py postgresfixture/tests/__init__.py postgresfixture/tests/test_cluster.py postgresfixture/tests/test_clusterfixture.py postgresfixture/tests/test_main.py