ceph-deploy-1.4.0/0000755000076500000240000000000012312561302014457 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/0000755000076500000240000000000012312561302016752 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/__init__.py0000644000076500000240000000003012312111700021045 0ustar alfredostaff00000000000000 __version__ = '1.4.0' ceph-deploy-1.4.0/ceph_deploy/admin.py0000644000076500000240000000316712306157710020432 0ustar alfredostaff00000000000000import logging from cStringIO import StringIO from ceph_deploy import exc from ceph_deploy import conf from ceph_deploy.cliutil import priority from ceph_deploy import hosts LOG = logging.getLogger(__name__) def admin(args): cfg = conf.ceph.load(args) conf_data = StringIO() cfg.write(conf_data) try: with file('%s.client.admin.keyring' % args.cluster, 'rb') as f: keyring = f.read() except: raise RuntimeError('%s.client.admin.keyring not found' % args.cluster) errors = 0 for hostname in args.client: LOG.debug('Pushing admin keys and conf to %s', hostname) try: distro = hosts.get(hostname, username=args.username) hostname = distro.conn.remote_module.shortname() distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) distro.conn.remote_module.write_file( '/etc/ceph/%s.client.admin.keyring' % args.cluster, keyring ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to configure %d admin hosts' % errors) @priority(70) def make(parser): """ Push configuration and client.admin key to a remote host. """ parser.add_argument( 'client', metavar='HOST', nargs='*', help='host to configure for ceph administration', ) parser.set_defaults( func=admin, ) ceph-deploy-1.4.0/ceph_deploy/cli.py0000644000076500000240000000741112312111556020100 0ustar alfredostaff00000000000000import pkg_resources import argparse import logging import textwrap import os import sys from string import join import ceph_deploy from ceph_deploy import exc, validate from ceph_deploy.util import log from ceph_deploy.util.decorators import catches LOG = logging.getLogger(__name__) __header__ = textwrap.dedent(""" -^- / \\ |O o| ceph-deploy v%s ).-.( '/|||\` | '|` | '|` """ % ceph_deploy.__version__) def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='Easy Ceph deployment\n\n%s' % __header__, ) verbosity = parser.add_mutually_exclusive_group(required=False) verbosity.add_argument( '-v', '--verbose', action='store_true', dest='verbose', default=False, help='be more verbose', ) verbosity.add_argument( '-q', '--quiet', action='store_true', dest='quiet', help='be less verbose', ) parser.add_argument( '--version', action='version', version='%s' % ceph_deploy.__version__, help='the current installed version of ceph-deploy', ) parser.add_argument( '--username', help='the username to connect to the remote host', ) parser.add_argument( '--overwrite-conf', action='store_true', help='overwrite an existing conf file on remote host (if present)', ) parser.add_argument( '--cluster', metavar='NAME', help='name of the cluster', type=validate.alphanumeric, ) sub = parser.add_subparsers( title='commands', metavar='COMMAND', help='description', ) entry_points = [ (ep.name, ep.load()) for ep in pkg_resources.iter_entry_points('ceph_deploy.cli') ] entry_points.sort( key=lambda (name, fn): getattr(fn, 'priority', 100), ) for (name, fn) in entry_points: p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) # ugly kludge but i really want to have a nice way to access # the program name, with subcommand, later p.set_defaults(prog=p.prog) if not os.environ.get('CEPH_DEPLOY_TEST'): p.set_defaults(cd_conf = ceph_deploy.conf.cephdeploy.load()) fn(p) parser.set_defaults( # we want to hold on to this, for later prog=parser.prog, cluster='ceph', ) return parser @catches((KeyboardInterrupt, RuntimeError, exc.DeployError,)) def main(args=None, namespace=None): parser = get_parser() if len(sys.argv) < 2: parser.print_help() sys.exit() else: args = parser.parse_args(args=args, namespace=namespace) console_loglevel = logging.DEBUG # start at DEBUG for now if args.quiet: console_loglevel = logging.WARNING if args.verbose: console_loglevel = logging.DEBUG # Console Logger sh = logging.StreamHandler() sh.setFormatter(log.color_format()) sh.setLevel(console_loglevel) # File Logger fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster)) fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter(log.BASE_FORMAT)) # because we're in a module already, __name__ is not the ancestor of # the rest of the package; use the root as the logger for everyone root_logger = logging.getLogger() # allow all levels at root_logger, handlers control individual levels root_logger.setLevel(logging.DEBUG) root_logger.addHandler(sh) root_logger.addHandler(fh) LOG.info("Invoked (%s): %s" %(ceph_deploy.__version__, join(sys.argv, " "))) return args.func(args) ceph-deploy-1.4.0/ceph_deploy/cliutil.py0000644000076500000240000000027612236715242021007 0ustar alfredostaff00000000000000def priority(num): """ Decorator to add a `priority` attribute to the function. """ def add_priority(fn): fn.priority = num return fn return add_priority ceph-deploy-1.4.0/ceph_deploy/conf/0000755000076500000240000000000012312561302017677 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/conf/__init__.py0000644000076500000240000000003612306157710022016 0ustar alfredostaff00000000000000import ceph import cephdeploy ceph-deploy-1.4.0/ceph_deploy/conf/ceph.py0000644000076500000240000000442112306157710021200 0ustar alfredostaff00000000000000import ConfigParser import contextlib from ceph_deploy import exc class _TrimIndentFile(object): def __init__(self, fp): self.fp = fp def readline(self): line = self.fp.readline() return line.lstrip(' \t') class CephConf(ConfigParser.RawConfigParser): def optionxform(self, s): s = s.replace('_', ' ') s = '_'.join(s.split()) return s def safe_get(self, section, key): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: #Use full parent function so we can replace it in the class # if desired return ConfigParser.RawConfigParser.get(self, section, key) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): return None def parse(fp): cfg = CephConf() ifp = _TrimIndentFile(fp) cfg.readfp(ifp) return cfg def load(args): path = '{cluster}.conf'.format(cluster=args.cluster) try: f = file(path) except IOError as e: raise exc.ConfigError(e) else: with contextlib.closing(f): return parse(f) def load_raw(args): """ Read the actual file *as is* without parsing/modifiying it so that it can be written maintaining its same properties. """ path = '{cluster}.conf'.format(cluster=args.cluster) try: with open(path) as ceph_conf: return ceph_conf.read() except (IOError, OSError) as e: raise exc.ConfigError(e) def write_conf(cluster, conf, overwrite): """ write cluster configuration to /etc/ceph/{cluster}.conf """ import os path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid()) if os.path.exists(path): with file(path, 'rb') as f: old = f.read() if old != conf and not overwrite: raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path) with file(tmp, 'w') as f: f.write(conf) f.flush() os.fsync(f) os.rename(tmp, path) ceph-deploy-1.4.0/ceph_deploy/conf/cephdeploy.py0000644000076500000240000001034512312111556022412 0ustar alfredostaff00000000000000from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError import os from os import path import re cd_conf_template = """ # # ceph-deploy configuration file # [ceph-deploy-global] # Overrides for some of ceph-deploy's global flags, like verbosity or cluster # name [ceph-deploy-install] # Overrides for some of ceph-deploy's install flags, like version of ceph to # install # # Repositories section # # yum repos: # [myrepo] # baseurl = https://user:pass@example.org/rhel6 # gpgurl = https://example.org/keys/release.asc # default = True # extra-repos = cephrepo # will install the cephrepo file too # # [cephrepo] # name=ceph repo noarch packages # baseurl=http://ceph.com/rpm-emperor/el6/noarch # enabled=1 # gpgcheck=1 # type=rpm-md # gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc # apt repos: # [myrepo] # baseurl = https://user:pass@example.org/ # gpgurl = https://example.org/keys/release.asc # default = True # extra-repos = cephrepo # will install the cephrepo file too # # [cephrepo] # baseurl=http://ceph.com/rpm-emperor/el6/noarch # gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc """ def location(): """ Find and return the location of the ceph-deploy configuration file. If this file does not exist, create one in a default location. """ return _locate_or_create() def load(): parser = Conf() parser.read(location()) return parser def _locate_or_create(): home_config = path.expanduser('~/.cephdeploy.conf') # With order of importance locations = [ path.join(path.dirname(os.getcwd()), 'cephdeploy.conf'), home_config, ] for location in locations: if path.exists(location): return location create_stub(home_config) return home_config def create_stub(_path=None): _path = _path or path.expanduser('~/.cephdeploy.conf') with open(_path, 'w') as cd_conf: cd_conf.write(cd_conf_template) class Conf(SafeConfigParser): """ Subclasses from SafeConfigParser to give a few helpers for the ceph-deploy configuration. Specifically, it addresses the need to work with custom sections that signal the usage of custom repositories. """ reserved_sections = ['ceph-deploy-global', 'ceph-deploy-install'] def get_safe(self, section, key, default=None): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: return self.get(section, key) except (NoSectionError, NoOptionError): return default def get_repos(self): """ Return all the repo sections from the config, excluding the ceph-deploy reserved sections. """ return [ section for section in self.sections() if section not in self.reserved_sections ] @property def has_repos(self): """ boolean to reflect having (or not) any repository sections """ for section in self.sections(): if section not in self.reserved_sections: return True return False def get_list(self, section, key): """ Assumes that the value for a given key is going to be a list separated by commas. It gets rid of trailing comments. If just one item is present it returns a list with a single item, if no key is found an empty list is returned. """ value = self.get_safe(section, key, []) if value == []: return value # strip comments value = re.split(r'\s+#', value)[0] # split on commas value = value.split(',') # strip spaces return [x.strip() for x in value] def get_default_repo(self): """ Go through all the repositories defined in the config file and search for a truthy value for the ``default`` key. If there isn't any return None. """ for repo in self.get_repos(): if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'): return repo return False ceph-deploy-1.4.0/ceph_deploy/config.py0000644000076500000240000000546012306157710020605 0ustar alfredostaff00000000000000import logging import os.path from ceph_deploy import exc from ceph_deploy import conf from ceph_deploy.cliutil import priority from ceph_deploy import hosts LOG = logging.getLogger(__name__) def config_push(args): conf_data = conf.ceph.load_raw(args) errors = 0 for hostname in args.client: LOG.debug('Pushing config to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to config %d hosts' % errors) def config_pull(args): topath = '{cluster}.conf'.format(cluster=args.cluster) frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) errors = 0 for hostname in args.client: try: LOG.debug('Checking %s for %s', hostname, frompath) distro = hosts.get(hostname, username=args.username) conf_file_contents = distro.conn.remote_module.get_file(frompath) if conf_file_contents is not None: LOG.debug('Got %s from %s', frompath, hostname) if os.path.exists(topath): with file(topath, 'rb') as f: existing = f.read() if existing != conf_file_contents and not args.overwrite_conf: LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) raise with file(topath, 'w') as f: f.write(conf_file_contents) return distro.conn.exit() LOG.debug('Empty or missing %s on %s', frompath, hostname) except: LOG.error('Unable to pull %s from %s', frompath, hostname) finally: errors += 1 raise exc.GenericError('Failed to fetch config from %d hosts' % errors) def config(args): if args.subcommand == 'push': config_push(args) elif args.subcommand == 'pull': config_pull(args) else: LOG.error('subcommand %s not implemented', args.subcommand) @priority(70) def make(parser): """ Push configuration file to a remote host. """ parser.add_argument( 'subcommand', metavar='SUBCOMMAND', choices=[ 'push', 'pull', ], help='push or pull', ) parser.add_argument( 'client', metavar='HOST', nargs='*', help='host to push/pull the config to/from', ) parser.set_defaults( func=config, ) ceph-deploy-1.4.0/ceph_deploy/connection.py0000644000076500000240000000256212245141667021505 0ustar alfredostaff00000000000000import getpass import socket from ceph_deploy.lib.remoto import Connection def get_connection(hostname, username, logger, threads=5, use_sudo=None): """ A very simple helper, meant to return a connection that will know about the need to use sudo. """ if use_sudo is None: use_sudo = needs_sudo() if username: hostname = "%s@%s" % (username, hostname) try: conn = Connection( hostname, logger=logger, sudo=use_sudo, threads=threads, ) # Set a timeout value in seconds to disconnect and move on # if no data is sent back. conn.global_timeout = 300 logger.debug("connected to host: %s " % hostname) return conn except Exception as error: msg = "connecting to host: %s " % hostname errors = "resulted in errors: %s %s" % (error.__class__.__name__, error) raise RuntimeError(msg + errors) def get_local_connection(logger, use_sudo=False): """ Helper for local connections that are sometimes needed to operate on local hosts """ return get_connection( socket.gethostname(), # cannot rely on 'localhost' here None, logger=logger, threads=1, use_sudo=use_sudo ) def needs_sudo(): if getpass.getuser() == 'root': return False return True ceph-deploy-1.4.0/ceph_deploy/exc.py0000644000076500000240000000300512300743144020104 0ustar alfredostaff00000000000000class DeployError(Exception): """ Unknown deploy error """ def __str__(self): doc = self.__doc__.strip() return ': '.join([doc] + [str(a) for a in self.args]) class UnableToResolveError(DeployError): """ Unable to resolve host """ class ClusterExistsError(DeployError): """ Cluster config exists already """ class ConfigError(DeployError): """ Cannot load config """ class NeedHostError(DeployError): """ No hosts specified to deploy to. """ class NeedMonError(DeployError): """ Cannot find nodes with ceph-mon. """ class NeedDiskError(DeployError): """ Must supply disk/path argument """ class UnsupportedPlatform(DeployError): """ Platform is not supported """ def __init__(self, distro, codename, release): self.distro = distro self.codename = codename self.release = release def __str__(self): return '{doc}: {distro} {codename} {release}'.format( doc=self.__doc__.strip(), distro=self.distro, codename=self.codename, release=self.release, ) class MissingPackageError(DeployError): """ A required package or command is missing """ def __init__(self, message): self.message = message def __str__(self): return self.message class GenericError(DeployError): def __init__(self, message): self.message = message def __str__(self): return self.message ceph-deploy-1.4.0/ceph_deploy/forgetkeys.py0000644000076500000240000000130212245141667021517 0ustar alfredostaff00000000000000import logging import errno from .cliutil import priority LOG = logging.getLogger(__name__) def forgetkeys(args): import os for f in [ 'mon', 'client.admin', 'bootstrap-osd', 'bootstrap-mds', ]: try: os.unlink('{cluster}.{what}.keyring'.format( cluster=args.cluster, what=f, )) except OSError, e: if e.errno == errno.ENOENT: pass else: raise @priority(100) def make(parser): """ Remove authentication keys from the local directory. """ parser.set_defaults( func=forgetkeys, ) ceph-deploy-1.4.0/ceph_deploy/gatherkeys.py0000644000076500000240000000432012245141667021506 0ustar alfredostaff00000000000000import os.path import logging from .cliutil import priority from . import hosts LOG = logging.getLogger(__name__) def fetch_file(args, frompath, topath, _hosts): if os.path.exists(topath): LOG.debug('Have %s', topath) return True else: for hostname in _hosts: LOG.debug('Checking %s for %s', hostname, frompath) distro = hosts.get(hostname, username=args.username) key = distro.conn.remote_module.get_file( frompath.format(hostname=hostname) ) if key is not None: LOG.debug('Got %s key from %s.', topath, hostname) with file(topath, 'w') as f: f.write(key) return True distro.conn.exit() LOG.warning('Unable to find %s on %s', frompath, _hosts) return False def gatherkeys(args): ret = 0 # client.admin r = fetch_file( args=args, frompath='/etc/ceph/{cluster}.client.admin.keyring'.format( cluster=args.cluster), topath='{cluster}.client.admin.keyring'.format( cluster=args.cluster), _hosts=args.mon, ) if not r: ret = 1 # mon. r = fetch_file( args=args, frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster, topath='{cluster}.mon.keyring'.format(cluster=args.cluster), _hosts=args.mon, ) if not r: ret = 1 # bootstrap for what in ['osd', 'mds']: r = fetch_file( args=args, frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format( cluster=args.cluster, what=what), topath='{cluster}.bootstrap-{what}.keyring'.format( cluster=args.cluster, what=what), _hosts=args.mon, ) if not r: ret = 1 return ret @priority(40) def make(parser): """ Gather authentication keys for provisioning new nodes. """ parser.add_argument( 'mon', metavar='HOST', nargs='+', help='monitor host to pull keys from', ) parser.set_defaults( func=gatherkeys, ) ceph-deploy-1.4.0/ceph_deploy/hosts/0000755000076500000240000000000012312561302020112 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/__init__.py0000644000076500000240000000561212300743144022232 0ustar alfredostaff00000000000000""" We deal (mostly) with remote hosts. To avoid special casing each different commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to that remote host and set all the special cases for running commands depending on the type of distribution/version we are dealing with. """ import logging from ceph_deploy import exc from ceph_deploy.hosts import debian, centos, fedora, suse, remotes from ceph_deploy.connection import get_connection logger = logging.getLogger() def get(hostname, username=None, fallback=None): """ Retrieve the module that matches the distribution of a ``hostname``. This function will connect to that host and retrieve the distribution informaiton, then return the appropriate module and slap a few attributes to that module defining the information it found from the hostname. For example, if host ``node1.example.com`` is an Ubuntu server, the ``debian`` module would be returned and the following would be set:: module.name = 'ubuntu' module.release = '12.04' module.codename = 'precise' :param hostname: A hostname that is reachable/resolvable over the network :param fallback: Optional fallback to use if no supported distro is found """ conn = get_connection( hostname, username=username, logger=logging.getLogger(hostname) ) conn.import_module(remotes) distro_name, release, codename = conn.remote_module.platform_information() if not codename or not _get_distro(distro_name): raise exc.UnsupportedPlatform( distro=distro_name, codename=codename, release=release) machine_type = conn.remote_module.machine_type() module = _get_distro(distro_name) module.name = distro_name module.release = release module.codename = codename module.conn = conn module.machine_type = machine_type module.init = _choose_init(distro_name, codename) return module def _get_distro(distro, fallback=None): if not distro: return distro = _normalized_distro_name(distro) distributions = { 'debian': debian, 'ubuntu': debian, 'centos': centos, 'scientific': centos, 'redhat': centos, 'fedora': fedora, 'suse': suse, } return distributions.get(distro) or _get_distro(fallback) def _normalized_distro_name(distro): distro = distro.lower() if distro.startswith(('redhat', 'red hat')): return 'redhat' elif distro.startswith(('scientific', 'scientific linux')): return 'scientific' elif distro.startswith(('suse', 'opensuse')): return 'suse' return distro def _choose_init(distro, codename): """ Select a init system for a given distribution. Returns the name of a init system (upstart, sysvinit ...). """ if distro == 'Ubuntu': return 'upstart' return 'sysvinit' ceph-deploy-1.4.0/ceph_deploy/hosts/centos/0000755000076500000240000000000012312561302021405 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/centos/__init__.py0000644000076500000240000000031512312111556023517 0ustar alfredostaff00000000000000import mon, pkg from install import install, mirror_install, repo_install from uninstall import uninstall # Allow to set some information about this distro # distro = None release = None codename = None ceph-deploy-1.4.0/ceph_deploy/hosts/centos/install.py0000644000076500000240000001044412312111556023432 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers, templates from ceph_deploy.lib.remoto import process def install(distro, version_kind, version, adjust_repos): release = distro.release machine = distro.machine_type # Even before EPEL, make sure we have `wget` pkg_managers.yum(distro.conn, 'wget') # Get EPEL installed before we continue: if adjust_repos: install_epel(distro) if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) ] ) if version_kind == 'stable': url = 'http://ceph.com/rpm-{version}/el6/'.format( version=version, ) elif version_kind == 'testing': url = 'http://ceph.com/rpm-testing/el6/' elif version_kind == 'dev': url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format( release=release.split(".",1)[0], machine=machine, version=version, ) process.run( distro.conn, [ 'rpm', '-Uvh', '--replacepkgs', '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format(url=url), ], ) process.run( distro.conn, [ 'yum', '-y', '-q', 'install', 'ceph', ], ) def install_epel(distro): """ CentOS and Scientific need the EPEL repo, otherwise Ceph cannot be installed. """ if distro.name.lower() in ['centos', 'scientific']: distro.conn.logger.info('adding EPEL repository') if float(distro.release) >= 6: process.run( distro.conn, ['wget', 'http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm'], ) pkg_managers.rpm( distro.conn, [ '--replacepkgs', 'epel-release-6*.rpm', ], ) else: process.run( distro.conn, ['wget', 'http://dl.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm'], ) pkg_managers.rpm( distro.conn, [ '--replacepkgs', 'epel-release-5*.rpm' ], ) def mirror_install(distro, repo_url, gpg_url, adjust_repos): repo_url = repo_url.strip('/') # Remove trailing slashes if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', gpg_url, ] ) ceph_repo_content = templates.ceph_repo.format( repo_url=repo_url, gpg_url=gpg_url ) distro.conn.remote_module.write_yum_repo(ceph_repo_content) # Before any install, make sure we have `wget` pkg_managers.yum(distro.conn, 'wget') pkg_managers.yum(distro.conn, 'ceph') def repo_install(distro, repo_name, baseurl, gpgkey, **kw): # Get some defaults name = kw.get('name', '%s repo' % repo_name) enabled = kw.get('enabled', 1) gpgcheck = kw.get('gpgcheck', 1) install_ceph = kw.pop('install_ceph', False) _type = 'repo-md' baseurl = baseurl.strip('/') # Remove trailing slashes process.run( distro.conn, [ 'rpm', '--import', gpgkey, ] ) repo_content = templates.custom_repo.format( repo_name=repo_name, name = name, baseurl = baseurl, enabled = enabled, gpgcheck = gpgcheck, _type = _type, gpgkey = gpgkey, ) distro.conn.remote_module.write_yum_repo( repo_content, "%s.repo" % repo_name ) # Some custom repos do not need to install ceph if install_ceph: # Before any install, make sure we have `wget` pkg_managers.yum(distro.conn, 'wget') pkg_managers.yum(distro.conn, 'ceph') ceph-deploy-1.4.0/ceph_deploy/hosts/centos/mon/0000755000076500000240000000000012312561302022176 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/centos/mon/__init__.py0000644000076500000240000000011612303130077024306 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add from create import create ceph-deploy-1.4.0/ceph_deploy/hosts/centos/mon/create.py0000644000076500000240000000111212245141667024023 0ustar alfredostaff00000000000000from ceph_deploy.hosts import common from ceph_deploy.lib.remoto import process def create(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() common.mon_create(distro, args, monitor_keyring, hostname) service = distro.conn.remote_module.which_service() process.run( distro.conn, [ service, 'ceph', '-c', '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), 'start', 'mon.{hostname}'.format(hostname=hostname) ], timeout=7, ) ceph-deploy-1.4.0/ceph_deploy/hosts/centos/pkg.py0000644000076500000240000000040412300743144022541 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers def install(distro, packages): return pkg_managers.yum( distro.conn, packages ) def remove(distro, packages): return pkg_managers.yum_remove( distro.conn, packages ) ceph-deploy-1.4.0/ceph_deploy/hosts/centos/uninstall.py0000644000076500000240000000036712300743144024001 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers def uninstall(conn, purge=False): packages = [ 'ceph', 'ceph-release', ] pkg_managers.yum_remove( conn, packages, ) pkg_managers.yum_clean(conn) ceph-deploy-1.4.0/ceph_deploy/hosts/common.py0000644000076500000240000001144012306157710021763 0ustar alfredostaff00000000000000from ceph_deploy.util import paths from ceph_deploy import conf from ceph_deploy.lib.remoto import process from StringIO import StringIO def ceph_version(conn): """ Log the remote ceph-version by calling `ceph --version` """ return process.run(conn, ['ceph', '--version']) def mon_create(distro, args, monitor_keyring, hostname): logger = distro.conn.logger logger.debug('remote hostname: %s' % hostname) path = paths.mon.path(args.cluster, hostname) done_path = paths.mon.done(args.cluster, hostname) init_path = paths.mon.init(args.cluster, hostname, distro.init) configuration = conf.ceph.load(args) conf_data = StringIO() configuration.write(conf_data) # write the configuration file distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) # if the mon path does not exist, create it distro.conn.remote_module.create_mon_path(path) logger.debug('checking for done path: %s' % done_path) if not distro.conn.remote_module.path_exists(done_path): logger.debug('done path does not exist: %s' % done_path) if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) keyring = paths.mon.keyring(args.cluster, hostname) logger.info('creating keyring file: %s' % keyring) distro.conn.remote_module.write_monitor_keyring( keyring, monitor_keyring, ) process.run( distro.conn, [ 'ceph-mon', '--cluster', args.cluster, '--mkfs', '-i', hostname, '--keyring', keyring, ], ) logger.info('unlinking keyring file %s' % keyring) distro.conn.remote_module.unlink(keyring) # create the done file distro.conn.remote_module.create_done_path(done_path) # create init path distro.conn.remote_module.create_init_path(init_path) def mon_add(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() logger = distro.conn.logger path = paths.mon.path(args.cluster, hostname) monmap_path = paths.mon.monmap(args.cluster, hostname) done_path = paths.mon.done(args.cluster, hostname) init_path = paths.mon.init(args.cluster, hostname, distro.init) configuration = conf.ceph.load(args) conf_data = StringIO() configuration.write(conf_data) # write the configuration file distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) # if the mon path does not exist, create it distro.conn.remote_module.create_mon_path(path) logger.debug('checking for done path: %s' % done_path) if not distro.conn.remote_module.path_exists(done_path): logger.debug('done path does not exist: %s' % done_path) if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) keyring = paths.mon.keyring(args.cluster, hostname) logger.info('creating keyring file: %s' % keyring) distro.conn.remote_module.write_monitor_keyring( keyring, monitor_keyring, ) # get the monmap process.run( distro.conn, [ 'ceph', 'mon', 'getmap', '-o', monmap_path, ], ) # now use it to prepare the monitor's data dir process.run( distro.conn, [ 'ceph-mon', '--cluster', args.cluster, '--mkfs', '-i', hostname, '--monmap', monmap_path, '--keyring', keyring, ], ) # add it process.run( distro.conn, [ 'ceph', 'mon', 'add', hostname, args.address, ], ) logger.info('unlinking keyring file %s' % keyring) distro.conn.remote_module.unlink(keyring) # create the done file distro.conn.remote_module.create_done_path(done_path) # create init path distro.conn.remote_module.create_init_path(init_path) # start the mon using the address process.run( distro.conn, [ 'ceph-mon', '-i', hostname, '--public-addr', args.address, ], ) ceph-deploy-1.4.0/ceph_deploy/hosts/debian/0000755000076500000240000000000012312561302021334 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/debian/__init__.py0000644000076500000240000000032312312111556023445 0ustar alfredostaff00000000000000import mon import pkg from install import install, mirror_install, repo_install from uninstall import uninstall # Allow to set some information about this distro # distro = None release = None codename = None ceph-deploy-1.4.0/ceph_deploy/hosts/debian/install.py0000644000076500000240000001131512312111556023357 0ustar alfredostaff00000000000000from ceph_deploy.lib.remoto import process from ceph_deploy.util import pkg_managers def install(distro, version_kind, version, adjust_repos): codename = distro.codename machine = distro.machine_type if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' # Make sure ca-certificates is installed process.run( distro.conn, [ 'env', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-q', 'install', '--assume-yes', 'ca-certificates', ] ) if adjust_repos: process.run( distro.conn, [ 'wget', '-O', '{key}.asc'.format(key=key), 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc'.format(key=key), ], stop_on_nonzero=False, ) process.run( distro.conn, [ 'apt-key', 'add', '{key}.asc'.format(key=key) ] ) if version_kind == 'stable': url = 'http://ceph.com/debian-{version}/'.format( version=version, ) elif version_kind == 'testing': url = 'http://ceph.com/debian-testing/' elif version_kind == 'dev': url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format( codename=codename, machine=machine, version=version, ) else: raise RuntimeError('Unknown version kind: %r' % version_kind) distro.conn.remote_module.write_sources_list(url, codename) process.run( distro.conn, ['apt-get', '-q', 'update'], ) # TODO this does not downgrade -- should it? process.run( distro.conn, [ 'env', 'DEBIAN_FRONTEND=noninteractive', 'DEBIAN_PRIORITY=critical', 'apt-get', '-q', '-o', 'Dpkg::Options::=--force-confnew', '--no-install-recommends', '--assume-yes', 'install', '--', 'ceph', 'ceph-mds', 'ceph-common', 'ceph-fs-common', # ceph only recommends gdisk, make sure we actually have # it; only really needed for osds, but minimal collateral 'gdisk', ], ) def mirror_install(distro, repo_url, gpg_url, adjust_repos): repo_url = repo_url.strip('/') # Remove trailing slashes if adjust_repos: process.run( distro.conn, [ 'wget', '-O', 'release.asc', gpg_url, ], stop_on_nonzero=False, ) process.run( distro.conn, [ 'apt-key', 'add', 'release.asc' ] ) distro.conn.remote_module.write_sources_list(repo_url, distro.codename) # Before any install, make sure we have `wget` pkg_managers.apt_update(distro.conn) packages = ( 'ceph', 'ceph-mds', 'ceph-common', 'ceph-fs-common', # ceph only recommends gdisk, make sure we actually have # it; only really needed for osds, but minimal collateral 'gdisk', ) pkg_managers.apt(distro.conn, packages) pkg_managers.apt(distro.conn, 'ceph') def repo_install(distro, repo_name, baseurl, gpgkey, **kw): # Get some defaults safe_filename = '%s.list' % repo_name.replace(' ', '-') install_ceph = kw.pop('install_ceph', False) baseurl = baseurl.strip('/') # Remove trailing slashes process.run( distro.conn, [ 'wget', '-O', 'release.asc', gpgkey, ], stop_on_nonzero=False, ) process.run( distro.conn, [ 'apt-key', 'add', 'release.asc' ] ) distro.conn.remote_module.write_sources_list( baseurl, distro.codename, safe_filename ) if install_ceph: # Before any install, make sure we have `wget` pkg_managers.apt_update(distro.conn) packages = ( 'ceph', 'ceph-mds', 'ceph-common', 'ceph-fs-common', # ceph only recommends gdisk, make sure we actually have # it; only really needed for osds, but minimal collateral 'gdisk', ) pkg_managers.apt(distro.conn, packages) pkg_managers.apt(distro.conn, 'ceph') ceph-deploy-1.4.0/ceph_deploy/hosts/debian/mon/0000755000076500000240000000000012312561302022125 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/debian/mon/__init__.py0000644000076500000240000000011612303130077024235 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add from create import create ceph-deploy-1.4.0/ceph_deploy/hosts/debian/mon/create.py0000644000076500000240000000235312245141667023762 0ustar alfredostaff00000000000000from ceph_deploy.hosts import common from ceph_deploy.lib.remoto import process def create(distro, args, monitor_keyring): logger = distro.conn.logger hostname = distro.conn.remote_module.shortname() common.mon_create(distro, args, monitor_keyring, hostname) service = distro.conn.remote_module.which_service() if not service: logger.warning('could not find `service` executable') if distro.init == 'upstart': # Ubuntu uses upstart process.run( distro.conn, [ 'initctl', 'emit', 'ceph-mon', 'cluster={cluster}'.format(cluster=args.cluster), 'id={hostname}'.format(hostname=hostname), ], timeout=7, ) elif distro.init == 'sysvinit': # Debian uses sysvinit process.run( distro.conn, [ service, 'ceph', '-c', '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), 'start', 'mon.{hostname}'.format(hostname=hostname) ], timeout=7, ) else: raise RuntimeError('create cannot use init %s' % distro.init) ceph-deploy-1.4.0/ceph_deploy/hosts/debian/pkg.py0000644000076500000240000000040412300743144022470 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers def install(distro, packages): return pkg_managers.apt( distro.conn, packages ) def remove(distro, packages): return pkg_managers.apt_remove( distro.conn, packages ) ceph-deploy-1.4.0/ceph_deploy/hosts/debian/uninstall.py0000644000076500000240000000050112245141667023730 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers from ceph_deploy.lib.remoto import process def uninstall(conn, purge=False): packages = [ 'ceph', 'ceph-mds', 'ceph-common', 'ceph-fs-common', ] pkg_managers.apt_remove( conn, packages, purge=purge, ) ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/0000755000076500000240000000000012312561302021352 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/__init__.py0000644000076500000240000000043512312111556023467 0ustar alfredostaff00000000000000import mon from ceph_deploy.hosts.centos import pkg from ceph_deploy.hosts.centos.install import repo_install from install import install, mirror_install from uninstall import uninstall # Allow to set some information about this distro # distro = None release = None codename = None ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/install.py0000644000076500000240000000440012245141667023405 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers, templates from ceph_deploy.lib.remoto import process def install(distro, version_kind, version, adjust_repos): release = distro.release machine = distro.machine_type if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) ] ) if version_kind == 'stable': url = 'http://ceph.com/rpm-{version}/fc{release}/'.format( version=version, release=release, ) elif version_kind == 'testing': url = 'http://ceph.com/rpm-testing/fc{release}'.format( release=release, ) elif version_kind == 'dev': url = 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/ref/{version}/'.format( release=release.split(".", 1)[0], machine=machine, version=version, ) process.run( distro.conn, [ 'rpm', '-Uvh', '--replacepkgs', '--force', '--quiet', '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format( url=url, release=release, ), ] ) process.run( distro.conn, [ 'yum', '-y', '-q', 'install', 'ceph', ], ) def mirror_install(distro, repo_url, gpg_url, adjust_repos): repo_url = repo_url.strip('/') # Remove trailing slashes if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', gpg_url, ] ) ceph_repo_content = templates.ceph_repo.format( repo_url=repo_url, gpg_url=gpg_url ) distro.conn.remote_module.write_yum_repo(ceph_repo_content) pkg_managers.yum(distro.conn, 'ceph') ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/mon/0000755000076500000240000000000012312561302022143 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/mon/__init__.py0000644000076500000240000000011612303130077024253 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add from create import create ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/mon/create.py0000644000076500000240000000111212245141667023770 0ustar alfredostaff00000000000000from ceph_deploy.hosts import common from ceph_deploy.lib.remoto import process def create(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() common.mon_create(distro, args, monitor_keyring, hostname) service = distro.conn.remote_module.which_service() process.run( distro.conn, [ service, 'ceph', '-c', '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), 'start', 'mon.{hostname}'.format(hostname=hostname) ], timeout=7, ) ceph-deploy-1.4.0/ceph_deploy/hosts/fedora/uninstall.py0000644000076500000240000000027612245141667023757 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers def uninstall(conn, purge=False): packages = [ 'ceph', ] pkg_managers.yum_remove( conn, packages, ) ceph-deploy-1.4.0/ceph_deploy/hosts/remotes.py0000644000076500000240000001401112312111556022141 0ustar alfredostaff00000000000000import errno import socket import os import shutil import tempfile import platform def platform_information(_linux_distribution=None): """ detect platform information from remote host """ linux_distribution = _linux_distribution or platform.linux_distribution distro, release, codename = linux_distribution() if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian debian_codenames = { '8': 'jessie', '7': 'wheezy', '6': 'squeeze', } major_version = release.split('.')[0] codename = debian_codenames.get(major_version, '') # In order to support newer jessie/sid or wheezy/sid strings we test this # if sid is buried in the minor, we should use sid anyway. if not codename and '/' in release: major, minor = release.split('/') if minor == 'sid': codename = minor else: codename = major return ( str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip() ) def machine_type(): """ detect machine type """ return platform.machine() def write_sources_list(url, codename, filename='ceph.list'): """add deb repo to sources.list""" repo_path = os.path.join('/etc/apt/sources.list.d', filename) with file(repo_path, 'w') as f: f.write('deb {url} {codename} main\n'.format( url=url, codename=codename, )) def write_yum_repo(content, filename='ceph.repo'): """set the contents of repo file to /etc/yum.repos.d/""" repo_path = os.path.join('/etc/yum.repos.d', filename) write_file(repo_path, content) def write_conf(cluster, conf, overwrite): """ write cluster configuration to /etc/ceph/{cluster}.conf """ path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) tmp_file = tempfile.NamedTemporaryFile(delete=False) err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path if os.path.exists(path): with file(path, 'rb') as f: old = f.read() if old != conf and not overwrite: raise RuntimeError(err_msg) tmp_file.write(conf) tmp_file.close() shutil.move(tmp_file.name, path) os.chmod(path, 0644) return if os.path.exists('/etc/ceph'): with open(path, 'w') as f: f.write(conf) os.chmod(path, 0644) else: err_msg = '/etc/ceph/ does not exist - could not write config' raise RuntimeError(err_msg) def write_keyring(path, key): """ create a keyring file """ # Note that we *require* to avoid deletion of the temp file # otherwise we risk not being able to copy the contents from # one file system to the other, hence the `delete=False` tmp_file = tempfile.NamedTemporaryFile(delete=False) tmp_file.write(key) tmp_file.close() shutil.move(tmp_file.name, path) def create_mon_path(path): """create the mon path if it does not exist""" if not os.path.exists(path): os.makedirs(path) def create_done_path(done_path): """create a done file to avoid re-doing the mon deployment""" with file(done_path, 'w'): pass def create_init_path(init_path): """create the init path if it does not exist""" if not os.path.exists(init_path): with file(init_path, 'w'): pass def append_to_file(file_path, contents): """append contents to file""" with open(file_path, 'a') as f: f.write(contents) def path_exists(path): return os.path.exists(path) def makedir(path): os.makedirs(path) def unlink(_file): os.unlink(_file) def write_monitor_keyring(keyring, monitor_keyring): """create the monitor keyring file""" write_file(keyring, monitor_keyring) def write_file(path, content): with file(path, 'w') as f: f.write(content) def touch_file(path): with file(path, 'wb') as f: # noqa pass def get_file(path): """ fetch remote file """ try: with file(path, 'rb') as f: return f.read() except IOError: pass def shortname(): """get remote short hostname""" return socket.gethostname().split('.', 1)[0] def which_service(): """ locating the `service` executable... """ # XXX This should get deprecated at some point. For now # it just bypasses and uses the new helper. return which('service') def which(executable): """find the location of an executable""" locations = ( '/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin', ) for location in locations: executable_path = os.path.join(location, executable) if os.path.exists(executable_path): return executable_path def make_mon_removed_dir(path, file_name): """ move old monitor data """ try: os.makedirs('/var/lib/ceph/mon-removed') except OSError, e: if e.errno != errno.EEXIST: raise shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) def safe_mkdir(path): """ create path if it doesn't exist """ try: os.mkdir(path) except OSError, e: if e.errno == errno.EEXIST: pass else: raise def zeroing(dev): """ zeroing last few blocks of device """ # this kills the crab # # sgdisk will wipe out the main copy of the GPT partition # table (sorry), but it doesn't remove the backup copies, and # subsequent commands will continue to complain and fail when # they see those. zeroing the last few blocks of the device # appears to do the trick. lba_size = 4096 size = 33 * lba_size return True with file(dev, 'wb') as f: f.seek(-size, os.SEEK_END) f.write(size*'\0') # remoto magic, needed to execute these functions remotely if __name__ == '__channelexec__': for item in channel: # noqa channel.send(eval(item)) # noqa ceph-deploy-1.4.0/ceph_deploy/hosts/suse/0000755000076500000240000000000012312561302021071 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/suse/__init__.py0000644000076500000240000000031512312111556023203 0ustar alfredostaff00000000000000import mon, pkg from install import install, mirror_install, repo_install from uninstall import uninstall # Allow to set some information about this distro # distro = None release = None codename = None ceph-deploy-1.4.0/ceph_deploy/hosts/suse/install.py0000644000076500000240000000677612312111556023133 0ustar alfredostaff00000000000000from ceph_deploy.util import templates, pkg_managers from ceph_deploy.lib.remoto import process def install(distro, version_kind, version, adjust_repos): release = distro.release machine = distro.machine_type if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' if distro.codename == 'Mantis': distro = 'opensuse12' else: distro = 'sles-11sp2' if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) ] ) if version_kind == 'stable': url = 'http://ceph.com/rpm-{version}/{distro}/'.format( version=version, distro=distro, ) elif version_kind == 'testing': url = 'http://ceph.com/rpm-testing/{distro}/'.format(distro=distro) elif version_kind == 'dev': url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format( distro=distro, release=release.split(".", 1)[0], machine=machine, version=version, ) process.run( distro.conn, [ 'rpm', '-Uvh', '--replacepkgs', '--force', '--quiet', '{url}noarch/ceph-release-1-0.noarch.rpm'.format( url=url, ), ] ) process.run( distro.conn, [ 'zypper', '--non-interactive', '--quiet', 'install', 'ceph', ], ) def mirror_install(distro, repo_url, gpg_url, adjust_repos): repo_url = repo_url.strip('/') # Remove trailing slashes if adjust_repos: process.run( distro.conn, [ 'rpm', '--import', gpg_url, ] ) ceph_repo_content = templates.ceph_repo.format( repo_url=repo_url, gpg_url=gpg_url ) distro.conn.remote_module.write_yum_repo(ceph_repo_content) process.run( distro.conn, [ 'zypper', '--non-interactive', '--quiet', 'install', 'ceph', ], ) def repo_install(distro, repo_name, baseurl, gpgkey, **kw): # Get some defaults name = kw.get('name', '%s repo' % repo_name) enabled = kw.get('enabled', 1) gpgcheck = kw.get('gpgcheck', 1) install_ceph = kw.pop('install_ceph', False) _type = 'repo-md' baseurl = baseurl.strip('/') # Remove trailing slashes process.run( distro.conn, [ 'rpm', '--import', gpgkey, ] ) repo_content = templates.custom_repo.format( repo_name=repo_name, name = name, baseurl = baseurl, enabled = enabled, gpgcheck = gpgcheck, _type = _type, gpgkey = gpgkey, ) distro.conn.remote_module.write_yum_repo( repo_content, "%s.repo" % repo_name ) # Some custom repos do not need to install ceph if install_ceph: # Before any install, make sure we have `wget` pkg_managers.zypper(distro.conn, 'wget') pkg_managers.zypper(distro.conn, 'ceph') ceph-deploy-1.4.0/ceph_deploy/hosts/suse/mon/0000755000076500000240000000000012312561302021662 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/hosts/suse/mon/__init__.py0000644000076500000240000000011612303130077023772 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add from create import create ceph-deploy-1.4.0/ceph_deploy/hosts/suse/mon/create.py0000644000076500000240000000077712303130077023513 0ustar alfredostaff00000000000000from ceph_deploy.hosts import common from ceph_deploy.lib.remoto import process def create(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() common.mon_create(distro, args, monitor_keyring, hostname) process.run( distro.conn, [ 'rcceph', '-c', '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), 'start', 'mon.{hostname}'.format(hostname=hostname) ], timeout=7, ) ceph-deploy-1.4.0/ceph_deploy/hosts/suse/pkg.py0000644000076500000240000000041212300743144022224 0ustar alfredostaff00000000000000from ceph_deploy.util import pkg_managers def install(distro, packages): return pkg_managers.zypper( distro.conn, packages ) def remove(distro, packages): return pkg_managers.zypper_remove( distro.conn, packages ) ceph-deploy-1.4.0/ceph_deploy/hosts/suse/uninstall.py0000644000076500000240000000052712245141667023475 0ustar alfredostaff00000000000000from ceph_deploy.lib.remoto import process def uninstall(conn, purge=False): packages = [ 'ceph', 'libcephfs1', 'librados2', 'librbd1', ] cmd = [ 'zypper', '--non-interactive', '--quiet', 'remove', ] cmd.extend(packages) process.run(conn, cmd) ceph-deploy-1.4.0/ceph_deploy/install.py0000644000076500000240000003141212312111556020775 0ustar alfredostaff00000000000000import argparse import logging import os from ceph_deploy import hosts from ceph_deploy.cliutil import priority from ceph_deploy.lib.remoto import process LOG = logging.getLogger(__name__) def install(args): # XXX This whole dance is because --stable is getting deprecated if args.stable is not None: LOG.warning('the --stable flag is deprecated, use --release instead') args.release = args.stable if args.version_kind == 'stable': version = args.release else: version = getattr(args, args.version_kind) # XXX Tango ends here. version_str = args.version_kind if version: version_str += ' version {version}'.format(version=version) LOG.debug( 'Installing %s on cluster %s hosts %s', version_str, args.cluster, ' '.join(args.host), ) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get(hostname, username=args.username) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) rlogger = logging.getLogger(hostname) rlogger.info('installing ceph on %s' % hostname) cd_conf = getattr(args, 'cd_conf', None) # custom repo arguments repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url gpg_fallback = 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' if gpg_url is None and repo_url: LOG.warning('--gpg-url was not used, will fallback') LOG.warning('using GPG fallback: %s', gpg_fallback) gpg_url = gpg_fallback if repo_url: # triggers using a custom repository # the user used a custom repo url, this should override anything # we can detect from the configuration, so warn about it if cd_conf: if cd_conf.get_default_repo(): rlogger.warning('a default repo was found but it was \ overridden on the CLI') if args.release in cd_conf.get_repos(): rlogger.warning('a custom repo was found but it was \ overridden on the CLI') rlogger.info('using custom repository location: %s', repo_url) distro.mirror_install( distro, repo_url, gpg_url, args.adjust_repos ) # Detect and install custom repos here if needed elif should_use_custom_repo(args, cd_conf, repo_url): LOG.info('detected valid custom repositories from config file') custom_repo(distro, args, cd_conf, rlogger) else: # otherwise a normal installation distro.install( distro, args.version_kind, version, args.adjust_repos ) # Check the ceph version we just installed hosts.common.ceph_version(distro.conn) distro.conn.exit() def should_use_custom_repo(args, cd_conf, repo_url): """ A boolean to determine the logic needed to proceed with a custom repo installation instead of cramming everything nect to the logic operator. """ if repo_url: # repo_url signals a CLI override, return False immediately return False if cd_conf: if cd_conf.has_repos: has_valid_release = args.release in cd_conf.get_repos() has_default_repo = cd_conf.get_default_repo() if has_valid_release or has_default_repo: return True return False def custom_repo(distro, args, cd_conf, rlogger): """ A custom repo install helper that will go through config checks to retrieve repos (and any extra repos defined) and install those ``cd_conf`` is the object built from argparse that holds the flags and information needed to determine what metadata from the configuration to be used. """ default_repo = cd_conf.get_default_repo() if args.release in cd_conf.get_repos(): LOG.info('will use repository from conf: %s' % args.release) default_repo = args.release elif default_repo: LOG.info('will use default repository: %s' % default_repo) # At this point we know there is a cd_conf and that it has custom # repos make sure we were able to detect and actual repo if not default_repo: LOG.warning('a ceph-deploy config was found with repos \ but could not default to one') else: options = dict(cd_conf.items(default_repo)) options['install_ceph'] = True extra_repos = cd_conf.get_list(default_repo, 'extra-repos') rlogger.info('adding custom repository file') try: distro.repo_install( distro, default_repo, options.pop('baseurl'), options.pop('gpgkey'), **options ) except KeyError as err: raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo)) for xrepo in extra_repos: rlogger.info('adding extra repo file: %s.repo' % xrepo) options = dict(cd_conf.items(xrepo)) try: distro.repo_install( distro, xrepo, options.pop('baseurl'), options.pop('gpgkey'), **options ) except KeyError as err: raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo)) def uninstall(args): LOG.debug( 'Uninstalling on cluster %s hosts %s', args.cluster, ' '.join(args.host), ) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get(hostname, username=args.username) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(hostname) rlogger.info('uninstalling ceph on %s' % hostname) distro.uninstall(distro.conn) distro.conn.exit() def purge(args): LOG.debug( 'Purging from cluster %s hosts %s', args.cluster, ' '.join(args.host), ) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get(hostname, username=args.username) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(hostname) rlogger.info('purging host ... %s' % hostname) distro.uninstall(distro.conn, purge=True) distro.conn.exit() def purge_data(args): LOG.debug( 'Purging data from cluster %s hosts %s', args.cluster, ' '.join(args.host), ) installed_hosts = [] for hostname in args.host: distro = hosts.get(hostname, username=args.username) ceph_is_installed = distro.conn.remote_module.which('ceph') if ceph_is_installed: installed_hosts.append(hostname) distro.conn.exit() if installed_hosts: LOG.error("ceph is still installed on: %s", installed_hosts) raise RuntimeError("refusing to purge data while ceph is still installed") for hostname in args.host: distro = hosts.get(hostname, username=args.username) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) rlogger = logging.getLogger(hostname) rlogger.info('purging data on %s' % hostname) # Try to remove the contents of /var/lib/ceph first, don't worry # about errors here, we deal with them later on process.check( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', ] ) # If we failed in the previous call, then we probably have OSDs # still mounted, so we unmount them here if distro.conn.remote_module.path_exists('/var/lib/ceph'): rlogger.warning( 'OSDs may still be mounted, trying to unmount them' ) process.run( distro.conn, [ 'find', '/var/lib/ceph', '-mindepth', '1', '-maxdepth', '2', '-type', 'd', '-exec', 'umount', '{}', ';', ] ) # And now we try again to remove the contents, since OSDs should be # unmounted, but this time we do check for errors process.run( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', ] ) process.run( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/etc/ceph/', ] ) distro.conn.exit() class StoreVersion(argparse.Action): """ Like ``"store"`` but also remember which one of the exclusive options was set. There are three kinds of versions: stable, testing and dev. This sets ``version_kind`` to be the right one of the above. This kludge essentially lets us differentiate explicitly set values from defaults. """ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) if self.dest == 'release': self.dest = 'stable' namespace.version_kind = self.dest @priority(20) def make(parser): """ Install Ceph packages on remote hosts. """ version = parser.add_mutually_exclusive_group() # XXX deprecated in favor of release version.add_argument( '--stable', nargs='?', action=StoreVersion, metavar='CODENAME', help='[DEPRECATED] install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--release', nargs='?', action=StoreVersion, metavar='CODENAME', help='install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--testing', nargs=0, action=StoreVersion, help='install the latest development release', ) version.add_argument( '--dev', nargs='?', action=StoreVersion, const='master', metavar='BRANCH_OR_TAG', help='install a bleeding edge build from Git branch\ or tag (default: %(default)s)', ) version.add_argument( '--adjust-repos', dest='adjust_repos', action='store_true', help='install packages modifying source repos', ) version.add_argument( '--no-adjust-repos', dest='adjust_repos', action='store_false', help='install packages without modifying source repos', ) version.set_defaults( func=install, stable=None, # XXX deprecated in favor of release release='emperor', dev='master', version_kind='stable', adjust_repos=True, ) parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to install on', ) parser.add_argument( '--repo-url', nargs='?', dest='repo_url', help='specify a repo URL that mirrors/contains ceph packages', ) parser.add_argument( '--gpg-url', nargs='?', dest='gpg_url', help='specify a GPG key URL to be used with custom repos\ (defaults to ceph.com)' ) parser.set_defaults( func=install, ) @priority(80) def make_uninstall(parser): """ Remove Ceph packages from remote hosts. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to uninstall Ceph from', ) parser.set_defaults( func=uninstall, ) @priority(80) def make_purge(parser): """ Remove Ceph packages from remote hosts and purge all data. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph from', ) parser.set_defaults( func=purge, ) @priority(80) def make_purge_data(parser): """ Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph data from', ) parser.set_defaults( func=purge_data, ) ceph-deploy-1.4.0/ceph_deploy/lib/0000755000076500000240000000000012312561302017520 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/lib/__init__.py0000644000076500000240000000042012245141667021642 0ustar alfredostaff00000000000000""" This module is meant for vendorizing Python libraries. Most libraries will need to have some ``sys.path`` alterations done unless they are doing relative imports. Do **not** add anything to this module that does not represent a vendorized library. """ import remoto ceph-deploy-1.4.0/ceph_deploy/lib/remoto/0000755000076500000240000000000012312561302021025 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/lib/remoto/__init__.py0000644000076500000240000000013112303131044023125 0ustar alfredostaff00000000000000from .connection import Connection from .file_sync import rsync __version__ = '0.0.15' ceph-deploy-1.4.0/ceph_deploy/lib/remoto/connection.py0000644000076500000240000000710312303131044023533 0ustar alfredostaff00000000000000import socket from .lib import execnet # # Connection Object # class Connection(object): def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True): self.hostname = hostname self.sudo = sudo self.logger = logger or FakeRemoteLogger() self.remote_module = None self.channel = None self.global_timeout = None # wait for ever if eager: self.gateway = self._make_gateway(hostname) def _make_gateway(self, hostname): return execnet.makegateway( self._make_connection_string(hostname) ) def _make_connection_string(self, hostname, _needs_ssh=None): _needs_ssh = _needs_ssh or needs_ssh interpreter = 'sudo python' if self.sudo else 'python' if _needs_ssh(hostname): return 'ssh=%s//python=%s' % (hostname, interpreter) return 'popen//python=%s' % interpreter def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.exit() return False def execute(self, function, **kw): return self.gateway.remote_exec(function, **kw) def exit(self): self.gateway.exit() def import_module(self, module): self.remote_module = ModuleExecute(self.gateway, module, self.logger) class ModuleExecute(object): def __init__(self, gateway, module, logger=None): self.channel = gateway.remote_exec(module) self.module = module self.logger = logger def __getattr__(self, name): if not hasattr(self.module, name): msg = "module %s does not have attribute %s" % (str(self.module), name) raise AttributeError(msg) docstring = self._get_func_doc(getattr(self.module, name)) def wrapper(*args): arguments = self._convert_args(args) if docstring: self.logger.debug(docstring) self.channel.send("%s(%s)" % (name, arguments)) try: return self.channel.receive() except Exception as error: # Error will come as a string of a traceback, remove everything # up to the actual exception since we do get garbage otherwise # that points to non-existent lines in the compiled code for tb_line in reversed(str(error).split('\n')): if tb_line: exc_line = tb_line break raise RuntimeError(exc_line) return wrapper def _get_func_doc(self, func): try: return getattr(func, 'func_doc').strip() except AttributeError: return '' def _convert_args(self, args): if args: if len(args) > 1: arguments = str(args).rstrip(')').lstrip('(') else: arguments = str(args).rstrip(',)').lstrip('(') else: arguments = '' return arguments # # FIXME this is getting ridiculous # class FakeRemoteLogger: def error(self, *a, **kw): pass def debug(self, *a, **kw): pass def info(self, *a, **kw): pass def warning(self, *a, **kw): pass def needs_ssh(hostname, _socket=None): """ Obtains remote hostname of the socket and cuts off the domain part of its FQDN. """ _socket = _socket or socket local_hostname = _socket.gethostname() local_short_hostname = local_hostname.split('.')[0] if local_hostname == hostname or local_short_hostname == hostname: return False return True ceph-deploy-1.4.0/ceph_deploy/lib/remoto/exc.py0000644000076500000240000000025612303131044022155 0ustar alfredostaff00000000000000from .lib import execnet HostNotFound = execnet.HostNotFound RemoteError = execnet.RemoteError TimeoutError = execnet.TimeoutError DataFormatError = execnet.DataFormatError ceph-deploy-1.4.0/ceph_deploy/lib/remoto/file_sync.py0000644000076500000240000000252512303131044023352 0ustar alfredostaff00000000000000from .lib import execnet from .connection import Connection, FakeRemoteLogger class _RSync(execnet.RSync): """ Inherits from ``execnet.RSync`` so that we can log nicely with the user logger instance (if any) back with the ``_report_send_file`` method """ def __init__(self, sourcedir, callback=None, verbose=True, logger=None): self.logger = logger super(_RSync, self).__init__(sourcedir, callback, verbose) def _report_send_file(self, gateway, modified_rel_path): if self._verbose: self.logger.info("syncing file: %s" % modified_rel_path) def rsync(hosts, source, destination, logger=None, sudo=False): """ Grabs the hosts (or single host), creates the connection object for each and set the rsync execnet engine to push the files. It assumes that all of the destinations for the different hosts is the same. This deviates from what execnet does because it has the flexibility to push to different locations. """ logger = logger or FakeRemoteLogger() sync = _RSync(source, logger=logger) # setup_targets if not isinstance(hosts, list): hosts = [hosts] for host in hosts: conn = Connection( host, logger, sudo, ) sync.add_target(conn.gateway, destination) return sync.send() ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/0000755000076500000240000000000012312561302021573 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/__init__.py0000644000076500000240000000023112303131044023674 0ustar alfredostaff00000000000000import sys import os this_dir = os.path.abspath(os.path.dirname(__file__)) if this_dir not in sys.path: sys.path.insert(0, this_dir) import execnet ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/0000755000076500000240000000000012312561302023226 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/__init__.py0000644000076500000240000000222112303131044025330 0ustar alfredostaff00000000000000""" execnet: pure python lib for connecting to local and remote Python Interpreters. (c) 2012, Holger Krekel and others """ __version__ = '1.2.0.dev2-ad1' from . import apipkg apipkg.initpkg(__name__, { 'PopenGateway': '.deprecated:PopenGateway', 'SocketGateway': '.deprecated:SocketGateway', 'SshGateway': '.deprecated:SshGateway', 'makegateway': '.multi:makegateway', 'set_execmodel': '.multi:set_execmodel', 'HostNotFound': '.gateway_bootstrap:HostNotFound', 'RemoteError': '.gateway_base:RemoteError', 'TimeoutError': '.gateway_base:TimeoutError', 'XSpec': '.xspec:XSpec', 'Group': '.multi:Group', 'MultiChannel': '.multi:MultiChannel', 'RSync': '.rsync:RSync', 'default_group': '.multi:default_group', 'dumps': '.gateway_base:dumps', 'loads': '.gateway_base:loads', 'load': '.gateway_base:load', 'dump': '.gateway_base:dump', 'DataFormatError': '.gateway_base:DataFormatError', }) # CHANGELOG # # 1.2.0-ad1: Patch `if case` in to_io method to prevent AttributeErrors ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/apipkg.py0000644000076500000240000001264012303131044025052 0ustar alfredostaff00000000000000""" apipkg: control the exported namespace of a python package. see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ import os import sys from types import ModuleType __version__ = '1.2' def initpkg(pkgname, exportdefs, attr=dict()): """ initialize given package from the export definitions. """ oldmod = sys.modules.get(pkgname) d = {} f = getattr(oldmod, '__file__', None) if f: f = os.path.abspath(f) d['__file__'] = f if hasattr(oldmod, '__version__'): d['__version__'] = oldmod.__version__ if hasattr(oldmod, '__loader__'): d['__loader__'] = oldmod.__loader__ if hasattr(oldmod, '__path__'): d['__path__'] = [os.path.abspath(p) for p in oldmod.__path__] if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): d['__doc__'] = oldmod.__doc__ d.update(attr) if hasattr(oldmod, "__dict__"): oldmod.__dict__.update(d) mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval class ApiModule(ModuleType): def __docget(self): try: return self.__doc except AttributeError: if '__doc__' in self.__map__: return self.__makeattr('__doc__') def __docset(self, value): self.__doc = value __doc__ = property(__docget, __docset) def __init__(self, name, importspec, implprefix=None, attr=None): self.__name__ = name self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] self.__map__ = {} self.__implprefix__ = implprefix or name if attr: for name, val in attr.items(): #print "setting", self.__name__, name, val setattr(self, name, val) for name, importspec in importspec.items(): if isinstance(importspec, dict): subname = '%s.%s'%(self.__name__, name) apimod = ApiModule(subname, importspec, implprefix) sys.modules[subname] = apimod setattr(self, name, apimod) else: parts = importspec.split(':') modpath = parts.pop(0) attrname = parts and parts[0] or "" if modpath[0] == '.': modpath = implprefix + modpath if not attrname: subname = '%s.%s'%(self.__name__, name) apimod = AliasModule(subname, modpath) sys.modules[subname] = apimod if '.' not in name: setattr(self, name, apimod) else: self.__map__[name] = (modpath, attrname) def __repr__(self): l = [] if hasattr(self, '__version__'): l.append("version=" + repr(self.__version__)) if hasattr(self, '__file__'): l.append('from ' + repr(self.__file__)) if l: return '' % (self.__name__, " ".join(l)) return '' % (self.__name__,) def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown.""" #print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') importobj(*target)() try: modpath, attrname = self.__map__[name] except KeyError: if target is not None and name != '__onfirstaccess__': # retry, onfirstaccess might have set attrs return getattr(self, name) raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) try: del self.__map__[name] except KeyError: pass # in a recursive-import situation a double-del can happen return result __getattr__ = __makeattr def __dict__(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] dict = dictdescr.__get__(self) if dict is not None: hasattr(self, 'some') for name in self.__all__: try: self.__makeattr(name) except AttributeError: pass return dict __dict__ = property(__dict__) def AliasModule(modname, modpath, attrname=None): mod = [] def getmod(): if not mod: x = importobj(modpath, None) if attrname is not None: x = getattr(x, attrname) mod.append(x) return mod[0] class AliasModule(ModuleType): def __repr__(self): x = modpath if attrname: x += "." + attrname return '' % (modname, x) def __getattribute__(self, name): return getattr(getmod(), name) def __setattr__(self, name, value): setattr(getmod(), name, value) def __delattr__(self, name): delattr(getmod(), name) return AliasModule(modname) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/deprecated.py0000644000076500000240000000302212303131044025671 0ustar alfredostaff00000000000000""" some deprecated calls (c) 2008-2009, Holger Krekel and others """ import execnet def PopenGateway(python=None): """ instantiate a gateway to a subprocess started with the given 'python' executable. """ APIWARN("1.0.0b4", "use makegateway('popen')") spec = execnet.XSpec("popen") spec.python = python return execnet.default_group.makegateway(spec) def SocketGateway(host, port): """ This Gateway provides interaction with a remote process by connecting to a specified socket. On the remote side you need to manually start a small script (py/execnet/script/socketserver.py) that accepts SocketGateway connections or use the experimental new_remote() method on existing gateways. """ APIWARN("1.0.0b4", "use makegateway('socket=host:port')") spec = execnet.XSpec("socket=%s:%s" %(host, port)) return execnet.default_group.makegateway(spec) def SshGateway(sshaddress, remotepython=None, ssh_config=None): """ instantiate a remote ssh process with the given 'sshaddress' and remotepython version. you may specify an ssh_config file. """ APIWARN("1.0.0b4", "use makegateway('ssh=host')") spec = execnet.XSpec("ssh=%s" % sshaddress) spec.python = remotepython spec.ssh_config = ssh_config return execnet.default_group.makegateway(spec) def APIWARN(version, msg, stacklevel=3): import warnings Warn = DeprecationWarning("(since version %s) %s" %(version, msg)) warnings.warn(Warn, stacklevel=stacklevel) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/gateway.py0000644000076500000240000001565412303131044025250 0ustar alfredostaff00000000000000""" gateway code for initiating popen, socket and ssh connections. (c) 2004-2013, Holger Krekel and others """ import sys, os, inspect, types, linecache import textwrap import execnet from execnet.gateway_base import Message from execnet import gateway_base importdir = os.path.dirname(os.path.dirname(execnet.__file__)) class Gateway(gateway_base.BaseGateway): """ Gateway to a local or remote Python Intepreter. """ def __init__(self, io, spec): super(Gateway, self).__init__(io=io, id=spec.id, _startcount=1) self.spec = spec self._initreceive() @property def remoteaddress(self): return self._io.remoteaddress def __repr__(self): """ return string representing gateway type and status. """ try: r = (self.hasreceiver() and 'receive-live' or 'not-receiving') i = len(self._channelfactory.channels()) except AttributeError: r = "uninitialized" i = "no" return "<%s id=%r %s, %s model, %s active channels>" %( self.__class__.__name__, self.id, r, self.execmodel.backend, i) def exit(self): """ trigger gateway exit. Defer waiting for finishing of receiver-thread and subprocess activity to when group.terminate() is called. """ self._trace("gateway.exit() called") if self not in self._group: self._trace("gateway already unregistered with group") return self._group._unregister(self) try: self._trace("--> sending GATEWAY_TERMINATE") self._send(Message.GATEWAY_TERMINATE) self._trace("--> io.close_write") self._io.close_write() except (ValueError, EOFError, IOError): v = sys.exc_info()[1] self._trace("io-error: could not send termination sequence") self._trace(" exception: %r" % v) def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): """ set the string coercion for this gateway the default is to try to convert py2 str as py3 str, but not to try and convert py3 str to py2 str """ self._strconfig = (py2str_as_py3str, py3str_as_py2str) data = gateway_base.dumps_internal(self._strconfig) self._send(Message.RECONFIGURE, data=data) def _rinfo(self, update=False): """ return some sys/env information from remote. """ if update or not hasattr(self, '_cache_rinfo'): ch = self.remote_exec(rinfo_source) self._cache_rinfo = RInfo(ch.receive()) return self._cache_rinfo def hasreceiver(self): """ return True if gateway is able to receive data. """ return self._receiverthread.running # approxmimation def remote_status(self): """ return information object about remote execution status. """ channel = self.newchannel() self._send(Message.STATUS, channel.id) statusdict = channel.receive() # the other side didn't actually instantiate a channel # so we just delete the internal id/channel mapping self._channelfactory._local_close(channel.id) return RemoteStatus(statusdict) def remote_exec(self, source, **kwargs): """ return channel object and connect it to a remote execution thread where the given ``source`` executes. * ``source`` is a string: execute source string remotely with a ``channel`` put into the global namespace. * ``source`` is a pure function: serialize source and call function with ``**kwargs``, adding a ``channel`` object to the keyword arguments. * ``source`` is a pure module: execute source of module with a ``channel`` in its global namespace In all cases the binding ``__name__='__channelexec__'`` will be available in the global namespace of the remotely executing code. """ call_name = None if isinstance(source, types.ModuleType): linecache.updatecache(inspect.getsourcefile(source)) source = inspect.getsource(source) elif isinstance(source, types.FunctionType): call_name = source.__name__ source = _source_of_function(source) else: source = textwrap.dedent(str(source)) if call_name is None and kwargs: raise TypeError("can't pass kwargs to non-function remote_exec") channel = self.newchannel() self._send(Message.CHANNEL_EXEC, channel.id, gateway_base.dumps_internal((source, call_name, kwargs))) return channel def remote_init_threads(self, num=None): """ DEPRECATED. Is currently a NO-OPERATION already.""" print ("WARNING: remote_init_threads() is a no-operation in execnet-1.2") class RInfo: def __init__(self, kwargs): self.__dict__.update(kwargs) def __repr__(self): info = ", ".join(["%s=%s" % item for item in self.__dict__.items()]) return "" % info RemoteStatus = RInfo def rinfo_source(channel): import sys, os channel.send(dict( executable = sys.executable, version_info = sys.version_info[:5], platform = sys.platform, cwd = os.getcwd(), pid = os.getpid(), )) def _find_non_builtin_globals(source, codeobj): try: import ast except ImportError: return None try: import __builtin__ except ImportError: import builtins as __builtin__ vars = dict.fromkeys(codeobj.co_varnames) all = [] for node in ast.walk(ast.parse(source)): if (isinstance(node, ast.Name) and node.id not in vars and node.id not in __builtin__.__dict__): all.append(node.id) return all def _source_of_function(function): if function.__name__ == '': raise ValueError("can't evaluate lambda functions'") #XXX: we dont check before remote instanciation # if arguments are used propperly args, varargs, keywords, defaults = inspect.getargspec(function) if args[0] != 'channel': raise ValueError('expected first function argument to be `channel`') if sys.version_info < (3,0): closure = function.func_closure codeobj = function.func_code else: closure = function.__closure__ codeobj = function.__code__ if closure is not None: raise ValueError("functions with closures can't be passed") try: source = inspect.getsource(function) except IOError: raise ValueError("can't find source file for %s" % function) source = textwrap.dedent(source) # just for inner functions used_globals = _find_non_builtin_globals(source, codeobj) if used_globals: raise ValueError( "the use of non-builtin globals isn't supported", used_globals, ) return source ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/gateway_base.py0000644000076500000240000014402712303131044026237 0ustar alfredostaff00000000000000""" base execnet gateway code send to the other side for bootstrapping. NOTE: aims to be compatible to Python 2.5-3.X, Jython and IronPython (C) 2004-2013 Holger Krekel, Armin Rigo, Benjamin Peterson, Ronny Pfannschmidt and others """ from __future__ import with_statement import sys, os, weakref import traceback, struct # NOTE that we want to avoid try/except style importing # to avoid setting sys.exc_info() during import # ISPY3 = sys.version_info >= (3, 0) if ISPY3: from io import BytesIO exec("def do_exec(co, loc): exec(co, loc)\n" "def reraise(cls, val, tb): raise val\n") unicode = str _long_type = int from _thread import interrupt_main else: from StringIO import StringIO as BytesIO exec("def do_exec(co, loc): exec co in loc\n" "def reraise(cls, val, tb): raise cls, val, tb\n") bytes = str _long_type = long try: from thread import interrupt_main except ImportError: interrupt_main = None class EmptySemaphore: acquire = release = lambda self: None def get_execmodel(backend): if hasattr(backend, "backend"): return backend if backend == "thread": importdef = { 'get_ident': ['thread::get_ident', '_thread::get_ident'], '_start_new_thread': ['thread::start_new_thread', '_thread::start_new_thread'], 'threading': ["threading",], 'queue': ["queue", "Queue"], 'sleep': ['time::sleep'], 'subprocess': ['subprocess'], 'socket': ['socket'], '_fdopen': ['os::fdopen'], '_lock': ['threading'], '_event': ['threading'], } def exec_start(self, func, args=()): self._start_new_thread(func, args) elif backend == "eventlet": importdef = { 'get_ident': ['eventlet.green.thread::get_ident'], '_spawn_n': ['eventlet::spawn_n'], 'threading': ['eventlet.green.threading'], 'queue': ["eventlet.queue"], 'sleep': ['eventlet::sleep'], 'subprocess': ['eventlet.green.subprocess'], 'socket': ['eventlet.green.socket'], '_fdopen': ['eventlet.green.os::fdopen'], '_lock': ['eventlet.green.threading'], '_event': ['eventlet.green.threading'], } def exec_start(self, func, args=()): self._spawn_n(func, *args) elif backend == "gevent": importdef = { 'get_ident': ['gevent.thread::get_ident'], '_spawn_n': ['gevent::spawn'], 'threading': ['threading'], 'queue': ["gevent.queue"], 'sleep': ['gevent::sleep'], 'subprocess': ['gevent.subprocess'], 'socket': ['gevent.socket'], # XXX '_fdopen': ['gevent.fileobject::FileObjectThread'], '_lock': ['gevent.lock'], '_event': ['gevent.event'], } def exec_start(self, func, args=()): self._spawn_n(func, *args) else: raise ValueError("unknown execmodel %r" %(backend,)) class ExecModel: def __init__(self, name): self._importdef = importdef self.backend = name self._count = 0 def __repr__(self): return "" % self.backend def __getattr__(self, name): locs = self._importdef.get(name) if locs is None: raise AttributeError(name) for loc in locs: parts = loc.split("::") loc = parts.pop(0) try: mod = __import__(loc, None, None, "__doc__") except ImportError: pass else: if parts: mod = getattr(mod, parts[0]) setattr(self, name, mod) return mod raise AttributeError(name) start = exec_start def fdopen(self, fd, mode, bufsize=1): return self._fdopen(fd, mode, bufsize) def WorkerPool(self, size=None, hasprimary=False): return WorkerPool(self, size, hasprimary=hasprimary) def Semaphore(self, size=None): if size is None: return EmptySemaphore() return self._lock.Semaphore(size) def Lock(self): return self._lock.RLock() def RLock(self): return self._lock.RLock() def Event(self): event = self._event.Event() if sys.version_info < (2,7): # patch wait function to return event state instead of None real_wait = event.wait def wait(timeout=None): real_wait(timeout=timeout) return event.isSet() event.wait = wait return event def PopenPiped(self, args): PIPE = self.subprocess.PIPE return self.subprocess.Popen(args, stdout=PIPE, stdin=PIPE) return ExecModel(backend) class Reply(object): """ reply instances provide access to the result of a function execution that got dispatched through WorkerPool.spawn() """ def __init__(self, task, threadmodel): self.task = task self._result_ready = threadmodel.Event() self.running = True def get(self, timeout=None): """ get the result object from an asynchronous function execution. if the function execution raised an exception, then calling get() will reraise that exception including its traceback. """ self.waitfinish(timeout) try: return self._result except AttributeError: reraise(*(self._excinfo[:3])) # noqa def waitfinish(self, timeout=None): if not self._result_ready.wait(timeout): raise IOError("timeout waiting for %r" %(self.task, )) def run(self): func, args, kwargs = self.task try: try: self._result = func(*args, **kwargs) except: self._excinfo = sys.exc_info() finally: self._result_ready.set() self.running = False class WorkerPool(object): """ A WorkerPool allows to spawn function executions to threads, returning a reply object on which you can ask for the result (and get exceptions reraised) """ def __init__(self, execmodel, size=None, hasprimary=False): """ by default allow unlimited number of spawns. """ self.execmodel = execmodel self._size = size self._running_lock = self.execmodel.Lock() self._sem = self.execmodel.Semaphore(size) self._running = set() self._shutdown_event = self.execmodel.Event() if hasprimary: if self.execmodel.backend != "thread": raise ValueError("hasprimary=True requires thread model") self._primary_thread_event = self.execmodel.Event() def integrate_as_primary_thread(self): """ integrate the thread with which we are called as a primary thread to dispatch to when spawn is called. """ assert self.execmodel.backend == "thread", self.execmodel # XXX insert check if we really are in the main thread primary_thread_event = self._primary_thread_event # interacts with code at REF1 while not self._shutdown_event.isSet(): primary_thread_event.wait() func, args, kwargs = self._primary_thread_task if func is None: # waitall() woke us up to finish the loop break func(*args, **kwargs) primary_thread_event.clear() def shutdown(self): self._shutdown_event.set() def wait_for_shutdown(self, timeout=None): return self._shutdown_event.wait(timeout=timeout) def active_count(self): return len(self._running) def spawn(self, func, *args, **kwargs): """ return Reply object for the asynchronous dispatch of the given func(*args, **kwargs). """ reply = Reply((func, args, kwargs), self.execmodel) def run_and_release(): reply.run() try: with self._running_lock: self._running.remove(reply) self._sem.release() if not self._running: try: self._waitall_event.set() except AttributeError: pass except TypeError: pass self._sem.acquire() with self._running_lock: self._running.add(reply) # REF1 in 'thread' model we give priority to running in main thread primary_thread_event = getattr(self, "_primary_thread_event", None) if primary_thread_event is not None: if not primary_thread_event.isSet(): self._primary_thread_task = run_and_release, (), {} primary_thread_event.set() return reply self.execmodel.start(run_and_release, ()) return reply def waitall(self, timeout=None): """ wait until all previosuly spawns have terminated. """ # if it exists signal primary_thread to finish its loop self._primary_thread_task = None, None, None try: self._primary_thread_event.set() except AttributeError: pass with self._running_lock: if not self._running: return True # if a Reply still runs, we let run_and_release # signal us -- note that we are still holding the # _running_lock to avoid race conditions self._waitall_event = self.execmodel.Event() return self._waitall_event.wait(timeout=timeout) sysex = (KeyboardInterrupt, SystemExit) DEBUG = os.environ.get('EXECNET_DEBUG') pid = os.getpid() if DEBUG == '2': def trace(*msg): try: line = " ".join(map(str, msg)) sys.stderr.write("[%s] %s\n" % (pid, line)) sys.stderr.flush() except Exception: pass # nothing we can do, likely interpreter-shutdown elif DEBUG: import tempfile, os.path fn = os.path.join(tempfile.gettempdir(), 'execnet-debug-%d' % pid) #sys.stderr.write("execnet-debug at %r" %(fn,)) debugfile = open(fn, 'w') def trace(*msg): try: line = " ".join(map(str, msg)) debugfile.write(line + "\n") debugfile.flush() except Exception: try: v = sys.exc_info()[1] sys.stderr.write( "[%s] exception during tracing: %r\n" % (pid, v)) except Exception: pass # nothing we can do, likely interpreter-shutdown else: notrace = trace = lambda *msg: None class Popen2IO: error = (IOError, OSError, EOFError) def __init__(self, outfile, infile, execmodel): # we need raw byte streams self.outfile, self.infile = outfile, infile if sys.platform == "win32": import msvcrt try: msvcrt.setmode(infile.fileno(), os.O_BINARY) msvcrt.setmode(outfile.fileno(), os.O_BINARY) except (AttributeError, IOError): pass self._read = getattr(infile, "buffer", infile).read self._write = getattr(outfile, "buffer", outfile).write self.execmodel = execmodel def read(self, numbytes): """Read exactly 'numbytes' bytes from the pipe. """ # a file in non-blocking mode may return less bytes, so we loop buf = bytes() while numbytes > len(buf): data = self._read(numbytes-len(buf)) if not data: raise EOFError("expected %d bytes, got %d" %(numbytes, len(buf))) buf += data return buf def write(self, data): """write out all data bytes. """ assert isinstance(data, bytes) self._write(data) self.outfile.flush() def close_read(self): self.infile.close() def close_write(self): self.outfile.close() class Message: """ encapsulates Messages and their wire protocol. """ _types = [] def __init__(self, msgcode, channelid=0, data=''): self.msgcode = msgcode self.channelid = channelid self.data = data @staticmethod def from_io(io): try: header = io.read(9) # type 1, channel 4, payload 4 if not header: raise EOFError("empty read") except EOFError: e = sys.exc_info()[1] raise EOFError('couldnt load message header, ' + e.args[0]) msgtype, channel, payload = struct.unpack('!bii', header) return Message(msgtype, channel, io.read(payload)) def to_io(self, io): if struct.pack is not None: header = struct.pack('!bii', self.msgcode, self.channelid, len(self.data)) io.write(header+self.data) def received(self, gateway): self._types[self.msgcode](self, gateway) def __repr__(self): class FakeChannel(object): _strconfig = False, False # never transform, never fail def __init__(self, id): self.id = id def __repr__(self): return '' % self.id FakeChannel.new = FakeChannel FakeChannel.gateway = FakeChannel name = self._types[self.msgcode].__name__.upper() try: data = loads_internal(self.data, FakeChannel) except LoadError: data = self.data r = repr(data) if len(r) > 90: return "" %(name, self.channelid, len(r)) else: return "" %(name, self.channelid, r) class GatewayReceivedTerminate(Exception): """ Receiverthread got termination message. """ def _setupmessages(): def status(message, gateway): # we use the channelid to send back information # but don't instantiate a channel object d = {'numchannels': len(gateway._channelfactory._channels), 'numexecuting': gateway._execpool.active_count(), 'execmodel': gateway.execmodel.backend, } gateway._send(Message.CHANNEL_DATA, message.channelid, dumps_internal(d)) gateway._send(Message.CHANNEL_CLOSE, message.channelid) def channel_exec(message, gateway): channel = gateway._channelfactory.new(message.channelid) gateway._local_schedulexec(channel=channel,sourcetask=message.data) def channel_data(message, gateway): gateway._channelfactory._local_receive(message.channelid, message.data) def channel_close(message, gateway): gateway._channelfactory._local_close(message.channelid) def channel_close_error(message, gateway): remote_error = RemoteError(loads_internal(message.data)) gateway._channelfactory._local_close(message.channelid, remote_error) def channel_last_message(message, gateway): gateway._channelfactory._local_close(message.channelid, sendonly=True) def gateway_terminate(message, gateway): raise GatewayReceivedTerminate(gateway) def reconfigure(message, gateway): if message.channelid == 0: target = gateway else: target = gateway._channelfactory.new(message.channelid) target._strconfig = loads_internal(message.data, gateway) types = [ status, reconfigure, gateway_terminate, channel_exec, channel_data, channel_close, channel_close_error, channel_last_message, ] for i, handler in enumerate(types): Message._types.append(handler) setattr(Message, handler.__name__.upper(), i) _setupmessages() def geterrortext(excinfo, format_exception=traceback.format_exception, sysex=sysex): try: l = format_exception(*excinfo) errortext = "".join(l) except sysex: raise except: errortext = '%s: %s' % (excinfo[0].__name__, excinfo[1]) return errortext class RemoteError(Exception): """ Exception containing a stringified error from the other side. """ def __init__(self, formatted): self.formatted = formatted Exception.__init__(self) def __str__(self): return self.formatted def __repr__(self): return "%s: %s" %(self.__class__.__name__, self.formatted) def warn(self): if self.formatted != INTERRUPT_TEXT: # XXX do this better sys.stderr.write("[%s] Warning: unhandled %r\n" % (os.getpid(), self,)) class TimeoutError(IOError): """ Exception indicating that a timeout was reached. """ NO_ENDMARKER_WANTED = object() class Channel(object): """Communication channel between two Python Interpreter execution points.""" RemoteError = RemoteError TimeoutError = TimeoutError _INTERNALWAKEUP = 1000 _executing = False def __init__(self, gateway, id): assert isinstance(id, int) self.gateway = gateway #XXX: defaults copied from Unserializer self._strconfig = getattr(gateway, '_strconfig', (True, False)) self.id = id self._items = self.gateway.execmodel.queue.Queue() self._closed = False self._receiveclosed = self.gateway.execmodel.Event() self._remoteerrors = [] def _trace(self, *msg): self.gateway._trace(self.id, *msg) def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED): """ set a callback function for receiving items. All already queued items will immediately trigger the callback. Afterwards the callback will execute in the receiver thread for each received data item and calls to ``receive()`` will raise an error. If an endmarker is specified the callback will eventually be called with the endmarker when the channel closes. """ _callbacks = self.gateway._channelfactory._callbacks with self.gateway._receivelock: if self._items is None: raise IOError("%r has callback already registered" %(self,)) items = self._items self._items = None while 1: try: olditem = items.get(block=False) except self.gateway.execmodel.queue.Empty: if not (self._closed or self._receiveclosed.isSet()): _callbacks[self.id] = ( callback, endmarker, self._strconfig, ) break else: if olditem is ENDMARKER: items.put(olditem) # for other receivers if endmarker is not NO_ENDMARKER_WANTED: callback(endmarker) break else: callback(olditem) def __repr__(self): flag = self.isclosed() and "closed" or "open" return "" % (self.id, flag) def __del__(self): if self.gateway is None: # can be None in tests return self._trace("channel.__del__") # no multithreading issues here, because we have the last ref to 'self' if self._closed: # state transition "closed" --> "deleted" for error in self._remoteerrors: error.warn() elif self._receiveclosed.isSet(): # state transition "sendonly" --> "deleted" # the remote channel is already in "deleted" state, nothing to do pass else: # state transition "opened" --> "deleted" # check if we are in the middle of interpreter shutdown # in which case the process will go away and we probably # don't need to try to send a closing or last message # (and often it won't work anymore to send things out) if Message is not None: if self._items is None: # has_callback msgcode = Message.CHANNEL_LAST_MESSAGE else: msgcode = Message.CHANNEL_CLOSE try: self.gateway._send(msgcode, self.id) except (IOError, ValueError): # ignore problems with sending pass def _getremoteerror(self): try: return self._remoteerrors.pop(0) except IndexError: try: return self.gateway._error except AttributeError: pass return None # # public API for channel objects # def isclosed(self): """ return True if the channel is closed. A closed channel may still hold items. """ return self._closed def makefile(self, mode='w', proxyclose=False): """ return a file-like object. mode can be 'w' or 'r' for writeable/readable files. if proxyclose is true file.close() will also close the channel. """ if mode == "w": return ChannelFileWrite(channel=self, proxyclose=proxyclose) elif mode == "r": return ChannelFileRead(channel=self, proxyclose=proxyclose) raise ValueError("mode %r not availabe" %(mode,)) def close(self, error=None): """ close down this channel with an optional error message. Note that closing of a channel tied to remote_exec happens automatically at the end of execution and cannot be done explicitely. """ if self._executing: raise IOError("cannot explicitly close channel within remote_exec") if self._closed: self.gateway._trace(self, "ignoring redundant call to close()") if not self._closed: # state transition "opened/sendonly" --> "closed" # threads warning: the channel might be closed under our feet, # but it's never damaging to send too many CHANNEL_CLOSE messages # however, if the other side triggered a close already, we # do not send back a closed message. if not self._receiveclosed.isSet(): put = self.gateway._send if error is not None: put(Message.CHANNEL_CLOSE_ERROR, self.id, dumps_internal(error)) else: put(Message.CHANNEL_CLOSE, self.id) self._trace("sent channel close message") if isinstance(error, RemoteError): self._remoteerrors.append(error) self._closed = True # --> "closed" self._receiveclosed.set() queue = self._items if queue is not None: queue.put(ENDMARKER) self.gateway._channelfactory._no_longer_opened(self.id) def waitclose(self, timeout=None): """ wait until this channel is closed (or the remote side otherwise signalled that no more data was being sent). The channel may still hold receiveable items, but not receive any more after waitclose() has returned. Exceptions from executing code on the other side are reraised as local channel.RemoteErrors. EOFError is raised if the reading-connection was prematurely closed, which often indicates a dying process. self.TimeoutError is raised after the specified number of seconds (default is None, i.e. wait indefinitely). """ self._receiveclosed.wait(timeout=timeout) # wait for non-"opened" state if not self._receiveclosed.isSet(): raise self.TimeoutError("Timeout after %r seconds" % timeout) error = self._getremoteerror() if error: raise error def send(self, item): """sends the given item to the other side of the channel, possibly blocking if the sender queue is full. The item must be a simple python type and will be copied to the other side by value. IOError is raised if the write pipe was prematurely closed. """ if self.isclosed(): raise IOError("cannot send to %r" %(self,)) self.gateway._send(Message.CHANNEL_DATA, self.id, dumps_internal(item)) def receive(self, timeout=None): """receive a data item that was sent from the other side. timeout: None [default] blocked waiting. A positive number indicates the number of seconds after which a channel.TimeoutError exception will be raised if no item was received. Note that exceptions from the remotely executing code will be reraised as channel.RemoteError exceptions containing a textual representation of the remote traceback. """ itemqueue = self._items if itemqueue is None: raise IOError("cannot receive(), channel has receiver callback") try: x = itemqueue.get(timeout=timeout) except self.gateway.execmodel.queue.Empty: raise self.TimeoutError("no item after %r seconds" %(timeout)) if x is ENDMARKER: itemqueue.put(x) # for other receivers raise self._getremoteerror() or EOFError() else: return x def __iter__(self): return self def next(self): try: return self.receive() except EOFError: raise StopIteration __next__ = next def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): """ set the string coercion for this channel the default is to try to convert py2 str as py3 str, but not to try and convert py3 str to py2 str """ self._strconfig = (py2str_as_py3str, py3str_as_py2str) data = dumps_internal(self._strconfig) self.gateway._send(Message.RECONFIGURE, self.id, data=data) ENDMARKER = object() INTERRUPT_TEXT = "keyboard-interrupted" class ChannelFactory(object): def __init__(self, gateway, startcount=1): self._channels = weakref.WeakValueDictionary() self._callbacks = {} self._writelock = gateway.execmodel.Lock() self.gateway = gateway self.count = startcount self.finished = False self._list = list # needed during interp-shutdown def new(self, id=None): """ create a new Channel with 'id' (or create new id if None). """ with self._writelock: if self.finished: raise IOError("connexion already closed: %s" % (self.gateway,)) if id is None: id = self.count self.count += 2 try: channel = self._channels[id] except KeyError: channel = self._channels[id] = Channel(self.gateway, id) return channel def channels(self): return self._list(self._channels.values()) # # internal methods, called from the receiver thread # def _no_longer_opened(self, id): try: del self._channels[id] except KeyError: pass try: callback, endmarker, strconfig = self._callbacks.pop(id) except KeyError: pass else: if endmarker is not NO_ENDMARKER_WANTED: callback(endmarker) def _local_close(self, id, remoteerror=None, sendonly=False): channel = self._channels.get(id) if channel is None: # channel already in "deleted" state if remoteerror: remoteerror.warn() self._no_longer_opened(id) else: # state transition to "closed" state if remoteerror: channel._remoteerrors.append(remoteerror) queue = channel._items if queue is not None: queue.put(ENDMARKER) self._no_longer_opened(id) if not sendonly: # otherwise #--> "sendonly" channel._closed = True # --> "closed" channel._receiveclosed.set() def _local_receive(self, id, data): # executes in receiver thread channel = self._channels.get(id) try: callback, endmarker, strconfig = self._callbacks[id] except KeyError: queue = channel and channel._items if queue is None: pass # drop data else: item = loads_internal(data, channel) queue.put(item) else: try: data = loads_internal(data, channel, strconfig) callback(data) # even if channel may be already closed except Exception: excinfo = sys.exc_info() self.gateway._trace("exception during callback: %s" % excinfo[1]) errortext = self.gateway._geterrortext(excinfo) self.gateway._send(Message.CHANNEL_CLOSE_ERROR, id, dumps_internal(errortext)) self._local_close(id, errortext) def _finished_receiving(self): self.gateway._trace("finished receiving") with self._writelock: self.finished = True for id in self._list(self._channels): self._local_close(id, sendonly=True) for id in self._list(self._callbacks): self._no_longer_opened(id) class ChannelFile(object): def __init__(self, channel, proxyclose=True): self.channel = channel self._proxyclose = proxyclose def isatty(self): return False def close(self): if self._proxyclose: self.channel.close() def __repr__(self): state = self.channel.isclosed() and 'closed' or 'open' return '' %(self.channel.id, state) class ChannelFileWrite(ChannelFile): def write(self, out): self.channel.send(out) def flush(self): pass class ChannelFileRead(ChannelFile): def __init__(self, channel, proxyclose=True): super(ChannelFileRead, self).__init__(channel, proxyclose) self._buffer = None def read(self, n): try: if self._buffer is None: self._buffer = self.channel.receive() while len(self._buffer) < n: self._buffer += self.channel.receive() except EOFError: self.close() if self._buffer is None: ret = "" else: ret = self._buffer[:n] self._buffer = self._buffer[n:] return ret def readline(self): if self._buffer is not None: i = self._buffer.find("\n") if i != -1: return self.read(i+1) line = self.read(len(self._buffer)+1) else: line = self.read(1) while line and line[-1] != "\n": c = self.read(1) if not c: break line += c return line class BaseGateway(object): exc_info = sys.exc_info _sysex = sysex id = "" def __init__(self, io, id, _startcount=2): self.execmodel = io.execmodel self._io = io self.id = id self._strconfig = (Unserializer.py2str_as_py3str, Unserializer.py3str_as_py2str) self._channelfactory = ChannelFactory(self, _startcount) self._receivelock = self.execmodel.RLock() # globals may be NONE at process-termination self.__trace = trace self._geterrortext = geterrortext self._receivepool = self.execmodel.WorkerPool(1) def _trace(self, *msg): self.__trace(self.id, *msg) def _initreceive(self): self._receiverthread = self._receivepool.spawn(self._thread_receiver) def _thread_receiver(self): def log(*msg): self._trace("[receiver-thread]", *msg) log("RECEIVERTHREAD: starting to run") io = self._io try: try: while 1: msg = Message.from_io(io) log("received", msg) with self._receivelock: msg.received(self) del msg except (KeyboardInterrupt, GatewayReceivedTerminate): pass except EOFError: log("EOF without prior gateway termination message") self._error = self.exc_info()[1] except Exception: log(self._geterrortext(self.exc_info())) finally: try: log('entering finalization') # wake up and terminate any execution waiting to receive self._channelfactory._finished_receiving() log('terminating execution') self._terminate_execution() log('closing read') self._io.close_read() log('closing write') self._io.close_write() log('leaving finalization') except: # be silent at shutdown pass def _terminate_execution(self): pass def _send(self, msgcode, channelid=0, data=bytes()): message = Message(msgcode, channelid, data) try: message.to_io(self._io) self._trace('sent', message) except (IOError, ValueError): e = sys.exc_info()[1] self._trace('failed to send', message, e) # ValueError might be because the IO is already closed raise IOError("cannot send (already closed?)") def _local_schedulexec(self, channel, sourcetask): channel.close("execution disallowed") # _____________________________________________________________________ # # High Level Interface # _____________________________________________________________________ # def newchannel(self): """ return a new independent channel. """ return self._channelfactory.new() def join(self, timeout=None): """ Wait for receiverthread to terminate. """ self._trace("waiting for receiver thread to finish") self._receiverthread.waitfinish() class SlaveGateway(BaseGateway): def _local_schedulexec(self, channel, sourcetask): sourcetask = loads_internal(sourcetask) self._execpool.spawn(self.executetask, ((channel, sourcetask))) def _terminate_execution(self): # called from receiverthread self._trace("shutting down execution pool") self._execpool.shutdown() if not self._execpool.waitall(5.0): self._trace("execution ongoing after 5 secs, trying interrupt_main") # We try hard to terminate execution based on the assumption # that there is only one gateway object running per-process. if sys.platform != "win32": self._trace("sending ourselves a SIGINT") os.kill(os.getpid(), 2) # send ourselves a SIGINT elif interrupt_main is not None: self._trace("calling interrupt_main()") interrupt_main() if not self._execpool.waitall(10.0): self._trace("execution did not finish in 10 secs, " "calling os._exit()") os._exit(1) def serve(self): trace = lambda msg: self._trace("[serve] " + msg) hasprimary = self.execmodel.backend == "thread" self._execpool = self.execmodel.WorkerPool(hasprimary=hasprimary) trace("spawning receiver thread") self._initreceive() try: try: if hasprimary: trace("integrating as main primary exec thread") self._execpool.integrate_as_primary_thread() else: trace("waiting for execution to finish") self._execpool.wait_for_shutdown() finally: trace("execution finished") trace("joining receiver thread") self.join() except KeyboardInterrupt: # in the slave we can't really do anything sensible trace("swallowing keyboardinterrupt, serve finished") def executetask(self, item): try: channel, (source, call_name, kwargs) = item if not ISPY3 and kwargs: # some python2 versions do not accept unicode keyword params # note: Unserializer generally turns py2-str to py3-str objects newkwargs = {} for name, value in kwargs.items(): if isinstance(name, unicode): name = name.encode('ascii') newkwargs[name] = value kwargs = newkwargs loc = {'channel' : channel, '__name__': '__channelexec__'} self._trace("execution starts[%s]: %s" % (channel.id, repr(source)[:50])) channel._executing = True try: co = compile(source+'\n', '', 'exec') do_exec(co, loc) # noqa if call_name: self._trace('calling %s(**%60r)' % (call_name, kwargs)) function = loc[call_name] function(channel, **kwargs) finally: channel._executing = False self._trace("execution finished") except KeyboardInterrupt: channel.close(INTERRUPT_TEXT) raise except: excinfo = self.exc_info() if not isinstance(excinfo[1], EOFError): if not channel.gateway._channelfactory.finished: self._trace("got exception: %r" % (excinfo[1],)) errortext = self._geterrortext(excinfo) channel.close(errortext) return self._trace("ignoring EOFError because receiving finished") channel.close() # # Cross-Python pickling code, tested from test_serializer.py # class DataFormatError(Exception): pass class DumpError(DataFormatError): """Error while serializing an object.""" class LoadError(DataFormatError): """Error while unserializing an object.""" if ISPY3: def bchr(n): return bytes([n]) else: bchr = chr DUMPFORMAT_VERSION = bchr(1) FOUR_BYTE_INT_MAX = 2147483647 FLOAT_FORMAT = "!d" FLOAT_FORMAT_SIZE = struct.calcsize(FLOAT_FORMAT) class _Stop(Exception): pass class Unserializer(object): num2func = {} # is filled after this class definition py2str_as_py3str = True # True py3str_as_py2str = False # false means py2 will get unicode def __init__(self, stream, channel_or_gateway=None, strconfig=None): gateway = getattr(channel_or_gateway, 'gateway', channel_or_gateway) strconfig = getattr(channel_or_gateway, '_strconfig', strconfig) if strconfig: self.py2str_as_py3str, self.py3str_as_py2str = strconfig self.stream = stream self.channelfactory = getattr(gateway, '_channelfactory', gateway) def load(self, versioned=False): if versioned: ver = self.stream.read(1) if ver != DUMPFORMAT_VERSION: raise LoadError("wrong dumpformat version") self.stack = [] try: while True: opcode = self.stream.read(1) if not opcode: raise EOFError try: loader = self.num2func[opcode] except KeyError: raise LoadError("unkown opcode %r - " "wire protocol corruption?" % (opcode,)) loader(self) except _Stop: if len(self.stack) != 1: raise LoadError("internal unserialization error") return self.stack.pop(0) else: raise LoadError("didn't get STOP") def load_none(self): self.stack.append(None) def load_true(self): self.stack.append(True) def load_false(self): self.stack.append(False) def load_int(self): i = self._read_int4() self.stack.append(i) def load_longint(self): s = self._read_byte_string() self.stack.append(int(s)) if ISPY3: load_long = load_int load_longlong = load_longint else: def load_long(self): i = self._read_int4() self.stack.append(long(i)) def load_longlong(self): l = self._read_byte_string() self.stack.append(long(l)) def load_float(self): binary = self.stream.read(FLOAT_FORMAT_SIZE) self.stack.append(struct.unpack(FLOAT_FORMAT, binary)[0]) def _read_int4(self): return struct.unpack("!i", self.stream.read(4))[0] def _read_byte_string(self): length = self._read_int4() as_bytes = self.stream.read(length) return as_bytes def load_py3string(self): as_bytes = self._read_byte_string() if not ISPY3 and self.py3str_as_py2str: # XXX Should we try to decode into latin-1? self.stack.append(as_bytes) else: self.stack.append(as_bytes.decode("utf-8")) def load_py2string(self): as_bytes = self._read_byte_string() if ISPY3 and self.py2str_as_py3str: s = as_bytes.decode("latin-1") else: s = as_bytes self.stack.append(s) def load_bytes(self): s = self._read_byte_string() self.stack.append(s) def load_unicode(self): self.stack.append(self._read_byte_string().decode("utf-8")) def load_newlist(self): length = self._read_int4() self.stack.append([None] * length) def load_setitem(self): if len(self.stack) < 3: raise LoadError("not enough items for setitem") value = self.stack.pop() key = self.stack.pop() self.stack[-1][key] = value def load_newdict(self): self.stack.append({}) def _load_collection(self, type_): length = self._read_int4() if length: res = type_(self.stack[-length:]) del self.stack[-length:] self.stack.append(res) else: self.stack.append(type_()) def load_buildtuple(self): self._load_collection(tuple) def load_set(self): self._load_collection(set) def load_frozenset(self): self._load_collection(frozenset) def load_stop(self): raise _Stop def load_channel(self): id = self._read_int4() newchannel = self.channelfactory.new(id) self.stack.append(newchannel) # automatically build opcodes and byte-encoding class opcode: """ container for name -> num mappings. """ def _buildopcodes(): l = [] for name, func in Unserializer.__dict__.items(): if name.startswith("load_"): opname = name[5:].upper() l.append((opname, func)) l.sort() for i,(opname, func) in enumerate(l): assert i < 26, "xxx" i = bchr(64+i) Unserializer.num2func[i] = func setattr(opcode, opname, i) _buildopcodes() def dumps(obj): """ return a serialized bytestring of the given obj. The obj and all contained objects must be of a builtin python type (so nested dicts, sets, etc. are all ok but not user-level instances). """ return _Serializer().save(obj, versioned=True) def dump(byteio, obj): """ write a serialized bytestring of the given obj to the given stream. """ _Serializer(write=byteio.write).save(obj, versioned=True) def loads(bytestring, py2str_as_py3str=False, py3str_as_py2str=False): """ return the object as deserialized from the given bytestring. py2str_as_py3str: if true then string (str) objects previously dumped on Python2 will be loaded as Python3 strings which really are text objects. py3str_as_py2str: if true then string (str) objects previously dumped on Python3 will be loaded as Python2 strings instead of unicode objects. if the bytestring was dumped with an incompatible protocol version or if the bytestring is corrupted, the ``execnet.DataFormatError`` will be raised. """ io = BytesIO(bytestring) return load(io, py2str_as_py3str=py2str_as_py3str, py3str_as_py2str=py3str_as_py2str) def load(io, py2str_as_py3str=False, py3str_as_py2str=False): """ derserialize an object form the specified stream. Behaviour and parameters are otherwise the same as with ``loads`` """ strconfig=(py2str_as_py3str, py3str_as_py2str) return Unserializer(io, strconfig=strconfig).load(versioned=True) def loads_internal(bytestring, channelfactory=None, strconfig=None): io = BytesIO(bytestring) return Unserializer(io, channelfactory, strconfig).load() def dumps_internal(obj): return _Serializer().save(obj) class _Serializer(object): _dispatch = {} def __init__(self, write=None): if write is None: self._streamlist = [] write = self._streamlist.append self._write = write def save(self, obj, versioned=False): # calling here is not re-entrant but multiple instances # may write to the same stream because of the common platform # atomic-write guaruantee (concurrent writes each happen atomicly) if versioned: self._write(DUMPFORMAT_VERSION) self._save(obj) self._write(opcode.STOP) try: streamlist = self._streamlist except AttributeError: return None return type(streamlist[0])().join(streamlist) def _save(self, obj): tp = type(obj) try: dispatch = self._dispatch[tp] except KeyError: methodname = 'save_' + tp.__name__ meth = getattr(self.__class__, methodname, None) if meth is None: raise DumpError("can't serialize %s" % (tp,)) dispatch = self._dispatch[tp] = meth dispatch(self, obj) def save_NoneType(self, non): self._write(opcode.NONE) def save_bool(self, boolean): if boolean: self._write(opcode.TRUE) else: self._write(opcode.FALSE) def save_bytes(self, bytes_): self._write(opcode.BYTES) self._write_byte_sequence(bytes_) if ISPY3: def save_str(self, s): self._write(opcode.PY3STRING) self._write_unicode_string(s) else: def save_str(self, s): self._write(opcode.PY2STRING) self._write_byte_sequence(s) def save_unicode(self, s): self._write(opcode.UNICODE) self._write_unicode_string(s) def _write_unicode_string(self, s): try: as_bytes = s.encode("utf-8") except UnicodeEncodeError: raise DumpError("strings must be utf-8 encodable") self._write_byte_sequence(as_bytes) def _write_byte_sequence(self, bytes_): self._write_int4(len(bytes_), "string is too long") self._write(bytes_) def _save_integral(self, i, short_op, long_op): if i <= FOUR_BYTE_INT_MAX: self._write(short_op) self._write_int4(i) else: self._write(long_op) self._write_byte_sequence(str(i).rstrip("L").encode("ascii")) def save_int(self, i): self._save_integral(i, opcode.INT, opcode.LONGINT) def save_long(self, l): self._save_integral(l, opcode.LONG, opcode.LONGLONG) def save_float(self, flt): self._write(opcode.FLOAT) self._write(struct.pack(FLOAT_FORMAT, flt)) def _write_int4(self, i, error="int must be less than %i" % (FOUR_BYTE_INT_MAX,)): if i > FOUR_BYTE_INT_MAX: raise DumpError(error) self._write(struct.pack("!i", i)) def save_list(self, L): self._write(opcode.NEWLIST) self._write_int4(len(L), "list is too long") for i, item in enumerate(L): self._write_setitem(i, item) def _write_setitem(self, key, value): self._save(key) self._save(value) self._write(opcode.SETITEM) def save_dict(self, d): self._write(opcode.NEWDICT) for key, value in d.items(): self._write_setitem(key, value) def save_tuple(self, tup): for item in tup: self._save(item) self._write(opcode.BUILDTUPLE) self._write_int4(len(tup), "tuple is too long") def _write_set(self, s, op): for item in s: self._save(item) self._write(op) self._write_int4(len(s), "set is too long") def save_set(self, s): self._write_set(s, opcode.SET) def save_frozenset(self, s): self._write_set(s, opcode.FROZENSET) def save_Channel(self, channel): self._write(opcode.CHANNEL) self._write_int4(channel.id) def init_popen_io(execmodel): if not hasattr(os, 'dup'): # jython io = Popen2IO(sys.stdout, sys.stdin, execmodel) import tempfile sys.stdin = tempfile.TemporaryFile('r') sys.stdout = tempfile.TemporaryFile('w') else: try: devnull = os.devnull except AttributeError: if os.name == 'nt': devnull = 'NUL' else: devnull = '/dev/null' # stdin stdin = execmodel.fdopen(os.dup(0), 'r', 1) fd = os.open(devnull, os.O_RDONLY) os.dup2(fd, 0) os.close(fd) # stdout stdout = execmodel.fdopen(os.dup(1), 'w', 1) fd = os.open(devnull, os.O_WRONLY) os.dup2(fd, 1) # stderr for win32 if os.name == 'nt': sys.stderr = execmodel.fdopen(os.dup(2), 'w', 1) os.dup2(fd, 2) os.close(fd) io = Popen2IO(stdout, stdin, execmodel) sys.stdin = execmodel.fdopen(0, 'r', 1) sys.stdout = execmodel.fdopen(1, 'w', 1) return io def serve(io, id): trace("creating slavegateway on %r" %(io,)) SlaveGateway(io=io, id=id, _startcount=2).serve() ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/gateway_bootstrap.py0000644000076500000240000000505412303131044027336 0ustar alfredostaff00000000000000""" code to initialize the remote side of a gateway once the io is created """ import os import inspect import execnet from execnet import gateway_base from execnet.gateway import Gateway importdir = os.path.dirname(os.path.dirname(execnet.__file__)) class HostNotFound(Exception): pass def bootstrap_popen(io, spec): sendexec(io, "import sys", "sys.path.insert(0, %r)" % importdir, "from execnet.gateway_base import serve, init_popen_io, get_execmodel", "sys.stdout.write('1')", "sys.stdout.flush()", "execmodel = get_execmodel(%r)" % spec.execmodel, "serve(init_popen_io(execmodel), id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode('ascii'), repr(s) def bootstrap_ssh(io, spec): try: sendexec(io, inspect.getsource(gateway_base), "execmodel = get_execmodel(%r)" % spec.execmodel, 'io = init_popen_io(execmodel)', "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode('ascii') except EOFError: ret = io.wait() if ret == 255: raise HostNotFound(io.remoteaddress) def bootstrap_socket(io, id): #XXX: switch to spec from execnet.gateway_socket import SocketIO sendexec(io, inspect.getsource(gateway_base), 'import socket', inspect.getsource(SocketIO), "try: execmodel", "except NameError:", " execmodel = get_execmodel('thread')", "io = SocketIO(clientsock, execmodel)", "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % id, ) s = io.read(1) assert s == "1".encode('ascii') def sendexec(io, *sources): source = "\n".join(sources) io.write((repr(source)+ "\n").encode('ascii')) def fix_pid_for_jython_popen(gw): """ fix for jython 2.5.1 """ spec, io = gw.spec, gw._io if spec.popen and not spec.via: #XXX: handle the case of remote being jython # and not having the popen pid if io.popen.pid is None: io.popen.pid = gw.remote_exec( "import os; channel.send(os.getpid())").receive() def bootstrap(io, spec): if spec.popen: bootstrap_popen(io, spec) elif spec.ssh: bootstrap_ssh(io, spec) elif spec.socket: bootstrap_socket(io, spec) else: raise ValueError('unknown gateway type, cant bootstrap') gw = Gateway(io, spec) fix_pid_for_jython_popen(gw) return gw ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/gateway_io.py0000644000076500000240000001450712303131044025733 0ustar alfredostaff00000000000000""" execnet io initialization code creates io instances used for gateway io """ import os import sys try: from execnet.gateway_base import Popen2IO, Message except ImportError: from __main__ import Popen2IO, Message class Popen2IOMaster(Popen2IO): def __init__(self, args, execmodel): self.popen = p = execmodel.PopenPiped(args) Popen2IO.__init__(self, p.stdin, p.stdout, execmodel=execmodel) def wait(self): try: return self.popen.wait() except OSError: pass # subprocess probably dead already def kill(self): killpopen(self.popen) def killpopen(popen): try: if hasattr(popen, 'kill'): popen.kill() else: killpid(popen.pid) except EnvironmentError: sys.stderr.write("ERROR killing: %s\n" %(sys.exc_info()[1])) sys.stderr.flush() def killpid(pid): if hasattr(os, 'kill'): os.kill(pid, 15) elif sys.platform == "win32" or getattr(os, '_name', None) == 'nt': try: import ctypes except ImportError: import subprocess # T: treekill, F: Force cmd = ("taskkill /T /F /PID %d" %(pid)).split() ret = subprocess.call(cmd) if ret != 0: raise EnvironmentError("taskkill returned %r" %(ret,)) else: PROCESS_TERMINATE = 1 handle = ctypes.windll.kernel32.OpenProcess( PROCESS_TERMINATE, False, pid) ctypes.windll.kernel32.TerminateProcess(handle, -1) ctypes.windll.kernel32.CloseHandle(handle) else: raise EnvironmentError("no method to kill %s" %(pid,)) popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))" def popen_args(spec): python = spec.python or sys.executable args = str(python).split(' ') args.append('-u') if spec is not None and spec.dont_write_bytecode: args.append("-B") # Slight gymnastics in ordering these arguments because CPython (as of # 2.7.1) ignores -B if you provide `python -c "something" -B` args.extend(['-c', popen_bootstrapline]) return args def ssh_args(spec): remotepython = spec.python or "python" args = ["ssh", "-C" ] if spec.ssh_config is not None: args.extend(['-F', str(spec.ssh_config)]) args.extend(spec.ssh.split()) remotecmd = '%s -c "%s"' % (remotepython, popen_bootstrapline) args.append(remotecmd) return args def create_io(spec, execmodel): if spec.popen: args = popen_args(spec) return Popen2IOMaster(args, execmodel) if spec.ssh: args = ssh_args(spec) io = Popen2IOMaster(args, execmodel) io.remoteaddress = spec.ssh return io # # Proxy Gateway handling code # # master: proxy initiator # forwarder: forwards between master and sub # sub: sub process that is proxied to the initiator RIO_KILL = 1 RIO_WAIT = 2 RIO_REMOTEADDRESS = 3 RIO_CLOSE_WRITE = 4 class ProxyIO(object): """ A Proxy IO object allows to instantiate a Gateway through another "via" gateway. A master:ProxyIO object provides an IO object effectively connected to the sub via the forwarder. To achieve this, master:ProxyIO interacts with forwarder:serve_proxy_io() which itself instantiates and interacts with the sub. """ def __init__(self, proxy_channel, execmodel): # after exchanging the control channel we use proxy_channel # for messaging IO self.controlchan = proxy_channel.gateway.newchannel() proxy_channel.send(self.controlchan) self.iochan = proxy_channel self.iochan_file = self.iochan.makefile('r') self.execmodel = execmodel def read(self, nbytes): return self.iochan_file.read(nbytes) def write(self, data): return self.iochan.send(data) def _controll(self, event): self.controlchan.send(event) return self.controlchan.receive() def close_write(self): self._controll(RIO_CLOSE_WRITE) def kill(self): self._controll(RIO_KILL) def wait(self): return self._controll(RIO_WAIT) @property def remoteaddress(self): return self._controll(RIO_REMOTEADDRESS) def __repr__(self): return '' % (self.iochan.gateway.id, ) class PseudoSpec: def __init__(self, vars): self.__dict__.update(vars) def __getattr__(self, name): return None def serve_proxy_io(proxy_channelX): execmodel = proxy_channelX.gateway.execmodel _trace = proxy_channelX.gateway._trace tag = "serve_proxy_io:%s " % proxy_channelX.id def log(*msg): _trace(tag + msg[0], *msg[1:]) spec = PseudoSpec(proxy_channelX.receive()) # create sub IO object which we will proxy back to our proxy initiator sub_io = create_io(spec, execmodel) control_chan = proxy_channelX.receive() log("got control chan", control_chan) # read data from master, forward it to the sub # XXX writing might block, thus blocking the receiver thread def forward_to_sub(data): log("forward data to sub, size %s" % len(data)) sub_io.write(data) proxy_channelX.setcallback(forward_to_sub) def controll(data): if data==RIO_WAIT: control_chan.send(sub_io.wait()) elif data==RIO_KILL: control_chan.send(sub_io.kill()) elif data==RIO_REMOTEADDRESS: control_chan.send(sub_io.remoteaddress) elif data==RIO_CLOSE_WRITE: control_chan.send(sub_io.close_write()) control_chan.setcallback(controll) # write data to the master coming from the sub forward_to_master_file = proxy_channelX.makefile("w") # read bootstrap byte from sub, send it on to master log('reading bootstrap byte from sub', spec.id) initial = sub_io.read(1) assert initial == '1'.encode('ascii'), initial log('forwarding bootstrap byte from sub', spec.id) forward_to_master_file.write(initial) # enter message forwarding loop while True: try: message = Message.from_io(sub_io) except EOFError: log('EOF from sub, terminating proxying loop', spec.id) break message.to_io(forward_to_master_file) # proxy_channelX will be closed from remote_exec's finalization code if __name__ == "__channelexec__": serve_proxy_io(channel) # noqa ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/gateway_socket.py0000644000076500000240000000476512303131044026621 0ustar alfredostaff00000000000000from execnet.gateway_bootstrap import HostNotFound import sys try: bytes except NameError: bytes = str class SocketIO: def __init__(self, sock, execmodel): self.sock = sock self.execmodel = execmodel socket = execmodel.socket try: sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10)# IPTOS_LOWDELAY sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) except (AttributeError, socket.error): sys.stderr.write("WARNING: cannot set socketoption") def read(self, numbytes): "Read exactly 'bytes' bytes from the socket." buf = bytes() while len(buf) < numbytes: t = self.sock.recv(numbytes - len(buf)) if not t: raise EOFError buf += t return buf def write(self, data): self.sock.sendall(data) def close_read(self): try: self.sock.shutdown(0) except self.execmodel.socket.error: pass def close_write(self): try: self.sock.shutdown(1) except self.execmodel.socket.error: pass def wait(self): pass def kill(self): pass def start_via(gateway, hostport=None): """ return a host, port tuple, after instanciating a socketserver on the given gateway """ if hostport is None: host, port = ('localhost', 0) else: host, port = hostport from execnet.script import socketserver # execute the above socketserverbootstrap on the other side channel = gateway.remote_exec(socketserver) channel.send((host, port)) (realhost, realport) = channel.receive() #self._trace("new_remote received" # "port=%r, hostname = %r" %(realport, hostname)) if not realhost or realhost=="0.0.0.0": realhost = "localhost" return realhost, realport def create_io(spec, group, execmodel): assert not spec.python, ( "socket: specifying python executables not yet supported") gateway_id = spec.installvia if gateway_id: host, port = start_via(group[gateway_id]) else: host, port = spec.socket.split(":") port = int(port) socket = execmodel.socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) io = SocketIO(sock, execmodel) io.remoteaddress = '%s:%d' % (host, port) try: sock.connect((host, port)) except execmodel.socket.gaierror: raise HostNotFound(str(sys.exc_info()[1])) return io ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/multi.py0000644000076500000240000002347612303131044024742 0ustar alfredostaff00000000000000""" Managing Gateway Groups and interactions with multiple channels. (c) 2008-2014, Holger Krekel and others """ import sys, atexit from execnet import XSpec from execnet import gateway_io, gateway_bootstrap from execnet.gateway_base import reraise, trace, get_execmodel from threading import Lock NO_ENDMARKER_WANTED = object() class Group(object): """ Gateway Groups. """ defaultspec = "popen" def __init__(self, xspecs=(), execmodel="thread"): """ initialize group and make gateways as specified. execmodel can be 'thread' or 'eventlet'. """ self._gateways = [] self._autoidcounter = 0 self._autoidlock = Lock() self._gateways_to_join = [] # we use the same execmodel for all of the Gateway objects # we spawn on our side. Probably we should not allow different # execmodels between different groups but not clear. # Note that "other side" execmodels may differ and is typically # specified by the spec passed to makegateway. self.set_execmodel(execmodel) for xspec in xspecs: self.makegateway(xspec) atexit.register(self._cleanup_atexit) @property def execmodel(self): return self._execmodel @property def remote_execmodel(self): return self._remote_execmodel def set_execmodel(self, execmodel, remote_execmodel=None): """ Set the execution model for local and remote site. execmodel can be one of "thread" or "eventlet" (XXX gevent). It determines the execution model for any newly created gateway. If remote_execmodel is not specified it takes on the value of execmodel. NOTE: Execution models can only be set before any gateway is created. """ if self._gateways: raise ValueError("can not set execution models if " "gateways have been created already") if remote_execmodel is None: remote_execmodel = execmodel self._execmodel = get_execmodel(execmodel) self._remote_execmodel = get_execmodel(remote_execmodel) def __repr__(self): idgateways = [gw.id for gw in self] return "" %(idgateways) def __getitem__(self, key): if isinstance(key, int): return self._gateways[key] for gw in self._gateways: if gw == key or gw.id == key: return gw raise KeyError(key) def __contains__(self, key): try: self[key] return True except KeyError: return False def __len__(self): return len(self._gateways) def __iter__(self): return iter(list(self._gateways)) def makegateway(self, spec=None): """create and configure a gateway to a Python interpreter. The ``spec`` string encodes the target gateway type and configuration information. The general format is:: key1=value1//key2=value2//... If you leave out the ``=value`` part a True value is assumed. Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``. Valid configuration:: id= specifies the gateway id python= specifies which python interpreter to execute execmodel=model 'thread', 'eventlet', 'gevent' model for execution chdir= specifies to which directory to change nice= specifies process priority of new process env:NAME=value specifies a remote environment variable setting. If no spec is given, self.defaultspec is used. """ if not spec: spec = self.defaultspec if not isinstance(spec, XSpec): spec = XSpec(spec) self.allocate_id(spec) if spec.execmodel is None: spec.execmodel = self.remote_execmodel.backend if spec.via: assert not spec.socket master = self[spec.via] proxy_channel = master.remote_exec(gateway_io) proxy_channel.send(vars(spec)) proxy_io_master = gateway_io.ProxyIO(proxy_channel, self.execmodel) gw = gateway_bootstrap.bootstrap(proxy_io_master, spec) elif spec.popen or spec.ssh: io = gateway_io.create_io(spec, execmodel=self.execmodel) gw = gateway_bootstrap.bootstrap(io, spec) elif spec.socket: from execnet import gateway_socket io = gateway_socket.create_io(spec, self, execmodel=self.execmodel) gw = gateway_bootstrap.bootstrap(io, spec) else: raise ValueError("no gateway type found for %r" % (spec._spec,)) gw.spec = spec self._register(gw) if spec.chdir or spec.nice or spec.env: channel = gw.remote_exec(""" import os path, nice, env = channel.receive() if path: if not os.path.exists(path): os.mkdir(path) os.chdir(path) if nice and hasattr(os, 'nice'): os.nice(nice) if env: for name, value in env.items(): os.environ[name] = value """) nice = spec.nice and int(spec.nice) or 0 channel.send((spec.chdir, nice, spec.env)) channel.waitclose() return gw def allocate_id(self, spec): """ (re-entrant) allocate id for the given xspec object. """ if spec.id is None: with self._autoidlock: id = "gw" + str(self._autoidcounter) self._autoidcounter += 1 if id in self: raise ValueError("already have gateway with id %r" %(id,)) spec.id = id def _register(self, gateway): assert not hasattr(gateway, '_group') assert gateway.id assert id not in self self._gateways.append(gateway) gateway._group = self def _unregister(self, gateway): self._gateways.remove(gateway) self._gateways_to_join.append(gateway) def _cleanup_atexit(self): trace("=== atexit cleanup %r ===" %(self,)) self.terminate(timeout=1.0) def terminate(self, timeout=None): """ trigger exit of member gateways and wait for termination of member gateways and associated subprocesses. After waiting timeout seconds try to to kill local sub processes of popen- and ssh-gateways. Timeout defaults to None meaning open-ended waiting and no kill attempts. """ while self: vias = {} for gw in self: if gw.spec.via: vias[gw.spec.via] = True for gw in self: if gw.id not in vias: gw.exit() def join_wait(gw): gw.join() gw._io.wait() def kill(gw): trace("Gateways did not come down after timeout: %r" % gw) gw._io.kill() safe_terminate(self.execmodel, timeout, [ (lambda: join_wait(gw), lambda: kill(gw)) for gw in self._gateways_to_join]) self._gateways_to_join[:] = [] def remote_exec(self, source, **kwargs): """ remote_exec source on all member gateways and return MultiChannel connecting to all sub processes. """ channels = [] for gw in self: channels.append(gw.remote_exec(source, **kwargs)) return MultiChannel(channels) class MultiChannel: def __init__(self, channels): self._channels = channels def __len__(self): return len(self._channels) def __iter__(self): return iter(self._channels) def __getitem__(self, key): return self._channels[key] def __contains__(self, chan): return chan in self._channels def send_each(self, item): for ch in self._channels: ch.send(item) def receive_each(self, withchannel=False): assert not hasattr(self, '_queue') l = [] for ch in self._channels: obj = ch.receive() if withchannel: l.append((ch, obj)) else: l.append(obj) return l def make_receive_queue(self, endmarker=NO_ENDMARKER_WANTED): try: return self._queue except AttributeError: self._queue = None for ch in self._channels: if self._queue is None: self._queue = ch.gateway.execmodel.queue.Queue() def putreceived(obj, channel=ch): self._queue.put((channel, obj)) if endmarker is NO_ENDMARKER_WANTED: ch.setcallback(putreceived) else: ch.setcallback(putreceived, endmarker=endmarker) return self._queue def waitclose(self): first = None for ch in self._channels: try: ch.waitclose() except ch.RemoteError: if first is None: first = sys.exc_info() if first: reraise(*first) def safe_terminate(execmodel, timeout, list_of_paired_functions): workerpool = execmodel.WorkerPool() def termkill(termfunc, killfunc): termreply = workerpool.spawn(termfunc) try: termreply.get(timeout=timeout) except IOError: killfunc() replylist = [] for termfunc, killfunc in list_of_paired_functions: reply = workerpool.spawn(termkill, termfunc, killfunc) replylist.append(reply) for reply in replylist: reply.get() workerpool.waitall() default_group = Group() makegateway = default_group.makegateway set_execmodel = default_group.set_execmodel ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/rsync.py0000644000076500000240000001604412303131044024737 0ustar alfredostaff00000000000000""" 1:N rsync implemenation on top of execnet. (c) 2006-2009, Armin Rigo, Holger Krekel, Maciej Fijalkowski """ import os, stat try: from hashlib import md5 except ImportError: from md5 import md5 try: from queue import Queue except ImportError: from Queue import Queue import execnet.rsync_remote class RSync(object): """ This class allows to send a directory structure (recursively) to one or multiple remote filesystems. There is limited support for symlinks, which means that symlinks pointing to the sourcetree will be send "as is" while external symlinks will be just copied (regardless of existance of such a path on remote side). """ def __init__(self, sourcedir, callback=None, verbose=True): self._sourcedir = str(sourcedir) self._verbose = verbose assert callback is None or hasattr(callback, '__call__') self._callback = callback self._channels = {} self._receivequeue = Queue() self._links = [] def filter(self, path): return True def _end_of_channel(self, channel): if channel in self._channels: # too early! we must have got an error channel.waitclose() # or else we raise one raise IOError('connection unexpectedly closed: %s ' % ( channel.gateway,)) def _process_link(self, channel): for link in self._links: channel.send(link) # completion marker, this host is done channel.send(42) def _done(self, channel): """ Call all callbacks """ finishedcallback = self._channels.pop(channel) if finishedcallback: finishedcallback() channel.waitclose() def _list_done(self, channel): # sum up all to send if self._callback: s = sum([self._paths[i] for i in self._to_send[channel]]) self._callback("list", s, channel) def _send_item(self, channel, data): """ Send one item """ modified_rel_path, checksum = data modifiedpath = os.path.join(self._sourcedir, *modified_rel_path) try: f = open(modifiedpath, 'rb') data = f.read() except IOError: data = None # provide info to progress callback function modified_rel_path = "/".join(modified_rel_path) if data is not None: self._paths[modified_rel_path] = len(data) else: self._paths[modified_rel_path] = 0 if channel not in self._to_send: self._to_send[channel] = [] self._to_send[channel].append(modified_rel_path) #print "sending", modified_rel_path, data and len(data) or 0, checksum if data is not None: f.close() if checksum is not None and checksum == md5(data).digest(): data = None # not really modified else: self._report_send_file(channel.gateway, modified_rel_path) channel.send(data) def _report_send_file(self, gateway, modified_rel_path): if self._verbose: print("%s <= %s" %(gateway, modified_rel_path)) def send(self, raises=True): """ Sends a sourcedir to all added targets. Flag indicates whether to raise an error or return in case of lack of targets """ if not self._channels: if raises: raise IOError("no targets available, maybe you " "are trying call send() twice?") return # normalize a trailing '/' away self._sourcedir = os.path.dirname(os.path.join(self._sourcedir, 'x')) # send directory structure and file timestamps/sizes self._send_directory_structure(self._sourcedir) # paths and to_send are only used for doing # progress-related callbacks self._paths = {} self._to_send = {} # send modified file to clients while self._channels: channel, req = self._receivequeue.get() if req is None: self._end_of_channel(channel) else: command, data = req if command == "links": self._process_link(channel) elif command == "done": self._done(channel) elif command == "ack": if self._callback: self._callback("ack", self._paths[data], channel) elif command == "list_done": self._list_done(channel) elif command == "send": self._send_item(channel, data) del data else: assert "Unknown command %s" % command def add_target(self, gateway, destdir, finishedcallback=None, **options): """ Adds a remote target specified via a gateway and a remote destination directory. """ for name in options: assert name in ('delete',) def itemcallback(req): self._receivequeue.put((channel, req)) channel = gateway.remote_exec(execnet.rsync_remote) channel.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False) channel.setcallback(itemcallback, endmarker = None) channel.send((str(destdir), options)) self._channels[channel] = finishedcallback def _broadcast(self, msg): for channel in self._channels: channel.send(msg) def _send_link(self, linktype, basename, linkpoint): self._links.append((linktype, basename, linkpoint)) def _send_directory(self, path): # dir: send a list of entries names = [] subpaths = [] for name in os.listdir(path): p = os.path.join(path, name) if self.filter(p): names.append(name) subpaths.append(p) mode = os.lstat(path).st_mode self._broadcast([mode] + names) for p in subpaths: self._send_directory_structure(p) def _send_link_structure(self, path): linkpoint = os.readlink(path) basename = path[len(self._sourcedir) + 1:] if linkpoint.startswith(self._sourcedir): self._send_link("linkbase", basename, linkpoint[len(self._sourcedir) + 1:]) else: # relative or absolute link, just send it self._send_link("link", basename, linkpoint) self._broadcast(None) def _send_directory_structure(self, path): try: st = os.lstat(path) except OSError: self._broadcast((None, 0, 0)) return if stat.S_ISREG(st.st_mode): # regular file: send a mode/timestamp/size pair self._broadcast((st.st_mode, st.st_mtime, st.st_size)) elif stat.S_ISDIR(st.st_mode): self._send_directory(path) elif stat.S_ISLNK(st.st_mode): self._send_link_structure(path) else: raise ValueError("cannot sync %r" % (path,)) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/rsync_remote.py0000644000076500000240000000702412303131044026310 0ustar alfredostaff00000000000000""" (c) 2006-2013, Armin Rigo, Holger Krekel, Maciej Fijalkowski """ def serve_rsync(channel): import os, stat, shutil try: from hashlib import md5 except ImportError: from md5 import md5 destdir, options = channel.receive() modifiedfiles = [] def remove(path): assert path.startswith(destdir) try: os.unlink(path) except OSError: # assume it's a dir shutil.rmtree(path) def receive_directory_structure(path, relcomponents): try: st = os.lstat(path) except OSError: st = None msg = channel.receive() if isinstance(msg, list): if st and not stat.S_ISDIR(st.st_mode): os.unlink(path) st = None if not st: os.makedirs(path) mode = msg.pop(0) if mode: os.chmod(path, mode) entrynames = {} for entryname in msg: destpath = os.path.join(path, entryname) receive_directory_structure(destpath, relcomponents + [entryname]) entrynames[entryname] = True if options.get('delete'): for othername in os.listdir(path): if othername not in entrynames: otherpath = os.path.join(path, othername) remove(otherpath) elif msg is not None: assert isinstance(msg, tuple) checksum = None if st: if stat.S_ISREG(st.st_mode): msg_mode, msg_mtime, msg_size = msg if msg_size != st.st_size: pass elif msg_mtime != st.st_mtime: f = open(path, 'rb') checksum = md5(f.read()).digest() f.close() elif msg_mode and msg_mode != st.st_mode: os.chmod(path, msg_mode) return else: return # already fine else: remove(path) channel.send(("send", (relcomponents, checksum))) modifiedfiles.append((path, msg)) receive_directory_structure(destdir, []) STRICT_CHECK = False # seems most useful this way for py.test channel.send(("list_done", None)) for path, (mode, time, size) in modifiedfiles: data = channel.receive() channel.send(("ack", path[len(destdir) + 1:])) if data is not None: if STRICT_CHECK and len(data) != size: raise IOError('file modified during rsync: %r' % (path,)) f = open(path, 'wb') f.write(data) f.close() try: if mode: os.chmod(path, mode) os.utime(path, (time, time)) except OSError: pass del data channel.send(("links", None)) msg = channel.receive() while msg != 42: # we get symlink _type, relpath, linkpoint = msg path = os.path.join(destdir, relpath) try: remove(path) except OSError: pass if _type == "linkbase": src = os.path.join(destdir, linkpoint) else: assert _type == "link", _type src = linkpoint os.symlink(src, path) msg = channel.receive() channel.send(("done", None)) if __name__ == '__channelexec__': serve_rsync(channel) # noqa ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/0000755000076500000240000000000012312561302024532 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/__init__.py0000644000076500000240000000000212303131044026627 0ustar alfredostaff00000000000000# ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/loop_socketserver.py0000644000076500000240000000063512303131044030654 0ustar alfredostaff00000000000000 import os, sys import subprocess if __name__ == '__main__': directory = os.path.dirname(os.path.abspath(sys.argv[0])) script = os.path.join(directory, 'socketserver.py') while 1: cmdlist = ["python", script] cmdlist.extend(sys.argv[1:]) text = "starting subcommand: " + " ".join(cmdlist) print(text) process = subprocess.Popen(cmdlist) process.wait() ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/quitserver.py0000644000076500000240000000044112303131044027310 0ustar alfredostaff00000000000000""" send a "quit" signal to a remote server """ import sys import socket hostport = sys.argv[1] host, port = hostport.split(':') hostport = (host, int(port)) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(hostport) sock.sendall('"raise KeyboardInterrupt"\n') ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/shell.py0000755000076500000240000000476712303131044026230 0ustar alfredostaff00000000000000#! /usr/bin/env python """ a remote python shell for injection into startserver.py """ import sys, os, socket, select try: clientsock except NameError: print("client side starting") host, port = sys.argv[1].split(':') port = int(port) myself = open(os.path.abspath(sys.argv[0]), 'rU').read() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) sock.sendall(repr(myself)+'\n') print("send boot string") inputlist = [ sock, sys.stdin ] try: while 1: r,w,e = select.select(inputlist, [], []) if sys.stdin in r: line = raw_input() sock.sendall(line + '\n') if sock in r: line = sock.recv(4096) sys.stdout.write(line) sys.stdout.flush() except: import traceback print(traceback.print_exc()) sys.exit(1) print("server side starting") # server side # from traceback import print_exc from threading import Thread class promptagent(Thread): def __init__(self, clientsock): Thread.__init__(self) self.clientsock = clientsock def run(self): print("Entering thread prompt loop") clientfile = self.clientsock.makefile('w') filein = self.clientsock.makefile('r') loc = self.clientsock.getsockname() while 1: try: clientfile.write('%s %s >>> ' % loc) clientfile.flush() line = filein.readline() if len(line)==0: raise EOFError("nothing") #print >>sys.stderr,"got line: " + line if line.strip(): oldout, olderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = clientfile, clientfile try: try: exec(compile(line + '\n','', 'single')) except: print_exc() finally: sys.stdout=oldout sys.stderr=olderr clientfile.flush() except EOFError: #e = sys.exc_info()[1] sys.stderr.write("connection close, prompt thread returns") break #print >>sys.stdout, "".join(apply(format_exception,sys.exc_info())) self.clientsock.close() prompter = promptagent(clientsock) # noqa prompter.start() print("promptagent - thread started") ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/socketserver.py0000755000076500000240000000714212303131044027626 0ustar alfredostaff00000000000000#! /usr/bin/env python """ start socket based minimal readline exec server it can exeuted in 2 modes of operation 1. as normal script, that listens for new connections 2. via existing_gateway.remote_exec (as imported module) """ # this part of the program only executes on the server side # progname = 'socket_readline_exec_server-1.2' import sys, os def get_fcntl(): try: import fcntl except ImportError: fcntl = None return fcntl fcntl = get_fcntl() debug = 0 if debug: # and not os.isatty(sys.stdin.fileno()): f = open('/tmp/execnet-socket-pyout.log', 'w') old = sys.stdout, sys.stderr sys.stdout = sys.stderr = f def print_(*args): print(" ".join(str(arg) for arg in args)) if sys.version_info > (3, 0): exec("""def exec_(source, locs): exec(source, locs)""") else: exec("""def exec_(source, locs): exec source in locs""") def exec_from_one_connection(serversock): print_(progname, 'Entering Accept loop', serversock.getsockname()) clientsock,address = serversock.accept() print_(progname, 'got new connection from %s %s' % address) clientfile = clientsock.makefile('rb') print_("reading line") # rstrip so that we can use \r\n for telnet testing source = clientfile.readline().rstrip() clientfile.close() g = {'clientsock' : clientsock, 'address' : address, 'execmodel': execmodel} source = eval(source) if source: co = compile(source+'\n', source, 'exec') print_(progname, 'compiled source, executing') try: exec_(co, g) # noqa finally: print_(progname, 'finished executing code') # background thread might hold a reference to this (!?) #clientsock.close() def bind_and_listen(hostport, execmodel): socket = execmodel.socket if isinstance(hostport, str): host, port = hostport.split(':') hostport = (host, int(port)) serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # set close-on-exec if hasattr(fcntl, 'FD_CLOEXEC'): old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD) fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) # allow the address to be re-used in a reasonable amount of time if os.name == 'posix' and sys.platform != 'cygwin': serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversock.bind(hostport) serversock.listen(5) return serversock def startserver(serversock, loop=False): try: while 1: try: exec_from_one_connection(serversock) except (KeyboardInterrupt, SystemExit): raise except: if debug: import traceback traceback.print_exc() else: excinfo = sys.exc_info() print_("got exception", excinfo[1]) if not loop: break finally: print_("leaving socketserver execloop") serversock.shutdown(2) if __name__ == '__main__': import sys if len(sys.argv)>1: hostport = sys.argv[1] else: hostport = ':8888' from execnet.gateway_base import get_execmodel execmodel = get_execmodel("thread") serversock = bind_and_listen(hostport, execmodel) startserver(serversock, loop=False) elif __name__=='__channelexec__': execmodel = channel.gateway.execmodel # noqa bindname = channel.receive() # noqa sock = bind_and_listen(bindname, execmodel) port = sock.getsockname() channel.send(port) # noqa startserver(sock) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/socketserverservice.py0000644000076500000240000000643712303131044031212 0ustar alfredostaff00000000000000""" A windows service wrapper for the py.execnet socketserver. To use, run: python socketserverservice.py register net start ExecNetSocketServer """ import sys import win32serviceutil import win32service import win32event import win32evtlogutil import servicemanager import threading import socketserver appname = 'ExecNetSocketServer' class SocketServerService(win32serviceutil.ServiceFramework): _svc_name_ = appname _svc_display_name_ = "%s" % appname _svc_deps_ = ["EventLog"] def __init__(self, args): # The exe-file has messages for the Event Log Viewer. # Register the exe-file as event source. # # Probably it would be better if this is done at installation time, # so that it also could be removed if the service is uninstalled. # Unfortunately it cannot be done in the 'if __name__ == "__main__"' # block below, because the 'frozen' exe-file does not run this code. # win32evtlogutil.AddSourceToRegistry(self._svc_display_name_, servicemanager.__file__, "Application") win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self.WAIT_TIME = 1000 # in milliseconds def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) def SvcDoRun(self): # Redirect stdout and stderr to prevent "IOError: [Errno 9] # Bad file descriptor". Windows services don't have functional # output streams. sys.stdout = sys.stderr = open('nul', 'w') # Write a 'started' event to the event log... win32evtlogutil.ReportEvent(self._svc_display_name_, servicemanager.PYS_SERVICE_STARTED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) print("Begin: %s" % (self._svc_display_name_)) hostport = ':8888' print('Starting py.execnet SocketServer on %s' % hostport) serversock = socketserver.bind_and_listen(hostport) thread = threading.Thread(target=socketserver.startserver, args=(serversock,), kwargs={'loop':True}) thread.setDaemon(True) thread.start() # wait to be stopped or self.WAIT_TIME to pass while True: result = win32event.WaitForSingleObject(self.hWaitStop, self.WAIT_TIME) if result == win32event.WAIT_OBJECT_0: break # write a 'stopped' event to the event log. win32evtlogutil.ReportEvent(self._svc_display_name_, servicemanager.PYS_SERVICE_STOPPED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) print("End: %s" % appname) if __name__ == '__main__': # Note that this code will not be run in the 'frozen' exe-file!!! win32serviceutil.HandleCommandLine(SocketServerService) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/script/xx.py0000644000076500000240000000024412303131044025537 0ustar alfredostaff00000000000000import rlcompleter2 rlcompleter2.setup() import register, sys try: hostport = sys.argv[1] except: hostport = ':8888' gw = register.ServerGateway(hostport) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/lib/execnet/xspec.py0000644000076500000240000000340012303131044024713 0ustar alfredostaff00000000000000""" (c) 2008-2013, holger krekel """ class XSpec: """ Execution Specification: key1=value1//key2=value2 ... * keys need to be unique within the specification scope * neither key nor value are allowed to contain "//" * keys are not allowed to contain "=" * keys are not allowed to start with underscore * if no "=value" is given, assume a boolean True value """ # XXX allow customization, for only allow specific key names popen = ssh = socket = python = chdir = nice = \ dont_write_bytecode = execmodel = None def __init__(self, string): self._spec = string self.env = {} for keyvalue in string.split("//"): i = keyvalue.find("=") if i == -1: key, value = keyvalue, True else: key, value = keyvalue[:i], keyvalue[i+1:] if key[0] == "_": raise AttributeError("%r not a valid XSpec key" % key) if key in self.__dict__: raise ValueError("duplicate key: %r in %r" %(key, string)) if key.startswith("env:"): self.env[key[4:]] = value else: setattr(self, key, value) def __getattr__(self, name): if name[0] == "_": raise AttributeError(name) return None def __repr__(self): return "" %(self._spec,) def __str__(self): return self._spec def __hash__(self): return hash(self._spec) def __eq__(self, other): return self._spec == getattr(other, '_spec', None) def __ne__(self, other): return self._spec != getattr(other, '_spec', None) def _samefilesystem(self): return bool(self.popen and not self.chdir) ceph-deploy-1.4.0/ceph_deploy/lib/remoto/log.py0000644000076500000240000000157712303131044022166 0ustar alfredostaff00000000000000 def reporting(conn, result, timeout=None): timeout = timeout or conn.global_timeout # -1 a.k.a. wait for ever log_map = { 'debug': conn.logger.debug, 'error': conn.logger.error, 'warning': conn.logger.warning } while True: try: received = result.receive(timeout) level_received, message = list(received.items())[0] log_map[level_received](message.strip('\n')) except EOFError: break except Exception as err: # the things we need to do here :( # because execnet magic, we cannot catch this as # `except TimeoutError` if err.__class__.__name__ == 'TimeoutError': msg = 'No data was received after %s seconds, disconnecting...' % timeout conn.logger.warning(msg) break raise ceph-deploy-1.4.0/ceph_deploy/lib/remoto/process.py0000644000076500000240000001122512303131044023052 0ustar alfredostaff00000000000000import traceback from .log import reporting from .util import admin_command, RemoteError def _remote_run(channel, cmd, **kw): import subprocess import sys stop_on_nonzero = kw.pop('stop_on_nonzero', True) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, **kw ) if process.stderr: while True: err = process.stderr.readline() if err == '' and process.poll() is not None: break if err != '': channel.send({'warning':err}) sys.stderr.flush() if process.stdout: while True: out = process.stdout.readline() if out == '' and process.poll() is not None: break if out != '': channel.send({'debug':out}) sys.stdout.flush() returncode = process.wait() if returncode != 0: if stop_on_nonzero: raise RuntimeError("command returned non-zero exit status: %s" % returncode) else: channel.send({'warning': "command returned non-zero exit status: %s" % returncode}) def run(conn, command, exit=False, timeout=None, **kw): """ A real-time-logging implementation of a remote subprocess.Popen call where a command is just executed on the remote end and no other handling is done. :param conn: A connection oject :param command: The command to pass in to the remote subprocess.Popen :param exit: If this call should close the connection at the end :param timeout: How many seconds to wait after no remote data is received (defaults to wait for ever) """ stop_on_error = kw.pop('stop_on_error', True) kw.setdefault( 'env', { 'PATH': '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin' } ) timeout = timeout or conn.global_timeout conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command))) result = conn.execute(_remote_run, cmd=command, **kw) try: reporting(conn, result, timeout) except Exception: remote_trace = traceback.format_exc() remote_error = RemoteError(remote_trace) if remote_error.exception_name == 'RuntimeError': conn.logger.error(remote_error.exception_line) else: for tb_line in remote_trace.split('\n'): conn.logger.error(tb_line) if stop_on_error: raise RuntimeError( 'Failed to execute command: %s' % ' '.join(command) ) if exit: conn.exit() def _remote_check(channel, cmd, **kw): import subprocess process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw ) stdout = [line.strip('\n') for line in process.stdout.readlines()] stderr = [line.strip('\n') for line in process.stderr.readlines()] channel.send((stdout, stderr, process.wait())) def check(conn, command, exit=False, timeout=None, **kw): """ Execute a remote command with ``subprocess.Popen`` but report back the results in a tuple with three items: stdout, stderr, and exit status. This helper function *does not* provide any logging as it is the caller's responsibility to do so. """ stop_on_error = kw.pop('stop_on_error', True) timeout = timeout or conn.global_timeout kw.setdefault( 'env', { 'PATH': '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin' } ) conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command))) result = conn.execute(_remote_check, cmd=command, **kw) try: return result.receive(timeout) except Exception as err: # the things we need to do here :( # because execnet magic, we cannot catch this as # `except TimeoutError` if err.__class__.__name__ == 'TimeoutError': msg = 'No data was received after %s seconds, disconnecting...' % timeout conn.logger.warning(msg) return else: remote_trace = traceback.format_exc() remote_error = RemoteError(remote_trace) if remote_error.exception_name == 'RuntimeError': conn.logger.error(remote_error.exception_line) else: for tb_line in remote_trace.split('\n'): conn.logger.error(tb_line) if stop_on_error: raise RuntimeError( 'Failed to execute command: %s' % ' '.join(command) ) if exit: conn.exit() ceph-deploy-1.4.0/ceph_deploy/lib/remoto/util.py0000644000076500000240000000200612303131044022346 0ustar alfredostaff00000000000000 def admin_command(sudo, command): """ If sudo is needed, make sure the command is prepended correctly, otherwise return the command as it came. :param sudo: A boolean representing the intention of having a sudo command (or not) :param command: A list of the actual command to execute with Popen. """ if sudo: if not isinstance(command, list): command = [command] return ['sudo'] + [cmd for cmd in command] return command class RemoteError(object): def __init__(self, traceback): self.orig_traceback = traceback self.exception_line = '' self.exception_name = self.get_exception_name() def get_exception_name(self): for tb_line in reversed(self.orig_traceback.split('\n')): if tb_line: for word in tb_line.split(): if word.endswith(':'): # exception! self.exception_line = tb_line return word.strip().strip(':') ceph-deploy-1.4.0/ceph_deploy/mds.py0000644000076500000240000001273712306157710020130 0ustar alfredostaff00000000000000from cStringIO import StringIO import errno import logging import os from ceph_deploy import conf from ceph_deploy import exc from ceph_deploy import hosts from ceph_deploy.lib.remoto import process from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def get_bootstrap_mds_key(cluster): """ Read the bootstrap-mds key for `cluster`. """ path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster) try: with file(path, 'rb') as f: return f.read() except IOError: raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'') def create_mds(conn, name, cluster, init): path = '/var/lib/ceph/mds/{cluster}-{name}'.format( cluster=cluster, name=name ) conn.remote_module.safe_mkdir(path) bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=cluster ) keypath = os.path.join(path, 'keyring') stdout, stderr, returncode = process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-mds', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'mds.{name}'.format(name=name), 'osd', 'allow rwx', 'mds', 'allow', 'mon', 'allow profile mds', '-o', os.path.join(keypath), ] ) if returncode > 0 and returncode != errno.EACCES: for line in stderr: conn.logger.error(line) for line in stdout: # yes stdout as err because this is an error conn.logger.error(line) conn.logger.error('exit code from command was: %s' % returncode) raise RuntimeError('could not create mds') process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-mds', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'mds.{name}'.format(name=name), 'osd', 'allow *', 'mds', 'allow', 'mon', 'allow rwx', '-o', os.path.join(keypath), ] ) conn.remote_module.touch_file(os.path.join(path, 'done')) conn.remote_module.touch_file(os.path.join(path, init)) if init == 'upstart': process.run( conn, [ 'initctl', 'emit', 'ceph-mds', 'cluster={cluster}'.format(cluster=cluster), 'id={name}'.format(name=name), ], timeout=7 ) elif init == 'sysvinit': process.run( conn, [ 'service', 'ceph', 'start', 'mds.{name}'.format(name=name), ], timeout=7 ) def mds_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) if not args.mds: raise exc.NeedHostError() key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, name in args.mds: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro.conn, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d MDSs' % errors) def mds(args): if args.subcommand == 'create': mds_create(args) else: LOG.error('subcommand %s not implemented', args.subcommand) def colon_separated(s): host = s name = s if s.count(':') == 1: (host, name) = s.split(':') return (host, name) @priority(30) def make(parser): """ Deploy ceph MDS on remote hosts. """ parser.add_argument( 'subcommand', metavar='SUBCOMMAND', choices=[ 'create', 'destroy', ], help='create or destroy', ) parser.add_argument( 'mds', metavar='HOST[:NAME]', nargs='*', type=colon_separated, help='host (and optionally the daemon name) to deploy on', ) parser.set_defaults( func=mds, ) ceph-deploy-1.4.0/ceph_deploy/memoize.py0000644000076500000240000000106412236715242021003 0ustar alfredostaff00000000000000import functools class NotFound(object): """ Sentinel object to say call was not memoized. Supposed to be faster than throwing exceptions on cache miss. """ def __str__(self): return self.__class__.__name__ NotFound = NotFound() def memoize(f): cache = {} @functools.wraps(f) def wrapper(*args, **kwargs): key = (args, tuple(sorted(kwargs.iteritems()))) val = cache.get(key, NotFound) if val is NotFound: val = cache[key] = f(*args, **kwargs) return val return wrapper ceph-deploy-1.4.0/ceph_deploy/misc.py0000644000076500000240000000103212245141667020270 0ustar alfredostaff00000000000000 def mon_hosts(mons): """ Iterate through list of MON hosts, return tuples of (name, host). """ for m in mons: if m.count(':'): (name, host) = m.split(':') else: name = m host = m if name.count('.') > 0: name = name.split('.')[0] yield (name, host) def remote_shortname(socket): """ Obtains remote hostname of the socket and cuts off the domain part of its FQDN. """ return socket.gethostname().split('.', 1)[0] ceph-deploy-1.4.0/ceph_deploy/mon.py0000644000076500000240000003561312306157710020134 0ustar alfredostaff00000000000000import argparse import json import logging import re import os from textwrap import dedent import time from ceph_deploy import conf, exc, admin from ceph_deploy.cliutil import priority from ceph_deploy.util import paths, net from ceph_deploy.lib.remoto import process from ceph_deploy import hosts from ceph_deploy.misc import mon_hosts from ceph_deploy.connection import get_connection from ceph_deploy import gatherkeys LOG = logging.getLogger(__name__) def mon_status_check(conn, logger, hostname, args): """ A direct check for JSON output on the monitor status. For newer versions of Ceph (dumpling and newer) a new mon_status command was added ( `ceph daemon mon mon_status` ) and should be revisited if the output changes as this check depends on that availability. """ asok_path = paths.mon.asok(args.cluster, hostname) out, err, code = process.check( conn, [ 'ceph', '--cluster={cluster}'.format(cluster=args.cluster), '--admin-daemon', asok_path, 'mon_status', ], ) for line in err: logger.error(line) try: return json.loads(''.join(out)) except ValueError: return {} def catch_mon_errors(conn, logger, hostname, cfg, args): """ Make sure we are able to catch up common mishaps with monitors and use that state of a monitor to determine what is missing and warn apropriately about it. """ monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {}) mon_initial_members = cfg.safe_get('global', 'mon_initial_members') public_addr = cfg.safe_get('global', 'public_addr') public_network = cfg.safe_get('global', 'public_network') mon_in_monmap = [ mon.get('name') for mon in monmap.get('mons', [{}]) if mon.get('name') == hostname ] if mon_initial_members is None or not hostname in mon_initial_members: logger.warning('%s is not defined in `mon initial members`', hostname) if not mon_in_monmap: logger.warning('monitor %s does not exist in monmap', hostname) if not public_addr and not public_network: logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors') logger.warning('monitors may not be able to form quorum') def mon_status(conn, logger, hostname, args, silent=False): """ run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide not only the output, but be able to return a boolean status of what is going on. ``False`` represents a monitor that is not doing OK even if it is up and running, while ``True`` would mean the monitor is up and running correctly. """ mon = 'mon.%s' % hostname try: out = mon_status_check(conn, logger, hostname, args) if not out: logger.warning('monitor: %s, might not be running yet' % mon) return False if not silent: logger.debug('*'*80) logger.debug('status for monitor: %s' % mon) for line in json.dumps(out, indent=2, sort_keys=True).split('\n'): logger.debug(line) logger.debug('*'*80) if out['rank'] >= 0: logger.info('monitor: %s is running' % mon) return True logger.info('monitor: %s is not running' % mon) return False except RuntimeError: logger.info('monitor: %s is not running' % mon) return False def mon_add(args): cfg = conf.ceph.load(args) if not args.mon: raise exc.NeedHostError() mon_host = args.mon[0] try: with file('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError( 'mon keyring not found; run \'new\' to create a new cluster' ) LOG.info('ensuring configuration of new mon host: %s', mon_host) args.client = [mon_host] admin.admin(args) LOG.debug( 'Adding mon to cluster %s, host %s', args.cluster, mon_host, ) mon_section = 'mon.%s' % mon_host cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr') if args.address: LOG.debug('using mon address via --address %s' % args.address) mon_ip = args.address elif cfg_mon_addr: LOG.debug('using mon address via configuration: %s' % cfg_mon_addr) mon_ip = cfg_mon_addr else: mon_ip = net.get_nonlocal_ip(mon_host) LOG.debug('using mon address by resolving host: %s' % mon_ip) try: LOG.debug('detecting platform for host %s ...', mon_host) distro = hosts.get(mon_host, username=args.username) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(mon_host) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, mon_host) rlogger.debug('adding mon to %s', mon_host) args.address = mon_ip distro.mon.add(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args) mon_status(distro.conn, rlogger, mon_host, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) raise exc.GenericError('Failed to add monitor to host: %s' % mon_host) def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: mon_initial_members = cfg.safe_get('global', 'mon_initial_members') args.mon = re.split(r'[,\s]+', mon_initial_members) if not args.mon: raise exc.NeedHostError() try: with file('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster') LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get(host, username=args.username) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start mon_status(distro.conn, rlogger, name, args) catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors) def hostname_is_compatible(conn, logger, provided_hostname): """ Make sure that the host that we are connecting to has the same value as the `hostname` in the remote host, otherwise mons can fail not reaching quorum. """ logger.debug('determining if provided host has same hostname in remote') remote_hostname = conn.remote_module.shortname() if remote_hostname == provided_hostname: return logger.warning('*'*80) logger.warning('provided hostname must match remote hostname') logger.warning('provided hostname: %s' % provided_hostname) logger.warning('remote hostname: %s' % remote_hostname) logger.warning('monitors may not reach quorum and create-keys will not complete') logger.warning('*'*80) def destroy_mon(conn, cluster, hostname): import datetime import time retries = 5 path = paths.mon.path(cluster, hostname) if conn.remote_module.path_exists(path): # remove from cluster process.run( conn, [ 'ceph', '--cluster={cluster}'.format(cluster=cluster), '-n', 'mon.', '-k', '{path}/keyring'.format(path=path), 'mon', 'remove', hostname, ], timeout=7, ) # stop if conn.remote_module.path_exists(os.path.join(path, 'upstart')): status_args = [ 'initctl', 'status', 'ceph-mon', 'cluster={cluster}'.format(cluster=cluster), 'id={hostname}'.format(hostname=hostname), ] elif conn.remote_module.path_exists(os.path.join(path, 'sysvinit')): status_args = [ 'service', 'ceph', 'status', 'mon.{hostname}'.format(hostname=hostname), ] while retries: conn.logger.info('polling the daemon to verify it stopped') if is_running(conn, status_args): time.sleep(5) retries -= 1 if retries <= 0: raise RuntimeError('ceph-mon deamon did not stop') else: break # archive old monitor directory fn = '{cluster}-{hostname}-{stamp}'.format( hostname=hostname, cluster=cluster, stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"), ) process.run( conn, [ 'mkdir', '-p', '/var/lib/ceph/mon-removed', ], ) conn.remote_module.make_mon_removed_dir(path, fn) def mon_destroy(args): errors = 0 for (name, host) in mon_hosts(args.mon): try: LOG.debug('Removing mon from %s', name) distro = hosts.get(host, username=args.username) hostname = distro.conn.remote_module.shortname() destroy_mon( distro.conn, args.cluster, hostname, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to destroy %d monitors' % errors) def mon_create_initial(args): cfg = conf.ceph.load(args) cfg_initial_members = cfg.safe_get('global', 'mon_initial_members') if cfg_initial_members is None: raise RuntimeError('No `mon initial members` defined in config') mon_initial_members = re.split(r'[,\s]+', cfg_initial_members) # create them normally through mon_create mon_create(args) # make the sets to be able to compare late mon_in_quorum = set([]) mon_members = set([host for host in mon_initial_members]) for host in mon_initial_members: mon_name = 'mon.%s' % host LOG.info('processing monitor %s', mon_name) sleeps = [20, 20, 15, 10, 10, 5] tries = 5 rlogger = logging.getLogger(host) rconn = get_connection(host, username=args.username, logger=rlogger) while tries: status = mon_status_check(rconn, rlogger, host, args) has_reached_quorum = status.get('state', '') in ['peon', 'leader'] if not has_reached_quorum: LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries)) tries -= 1 sleep_seconds = sleeps.pop() LOG.warning('waiting %s seconds before retrying', sleep_seconds) time.sleep(sleep_seconds) # Magic number else: mon_in_quorum.add(host) LOG.info('%s monitor has reached quorum!', mon_name) break rconn.exit() if mon_in_quorum == mon_members: LOG.info('all initial monitors are running and have formed quorum') LOG.info('Running gatherkeys...') gatherkeys.gatherkeys(args) else: LOG.error('Some monitors have still not reached quorum:') for host in mon_members - mon_in_quorum: LOG.error('%s', host) raise SystemExit('cluster may not be in a healthy state') def mon(args): if args.subcommand == 'create': mon_create(args) elif args.subcommand == 'add': mon_add(args) elif args.subcommand == 'destroy': mon_destroy(args) elif args.subcommand == 'create-initial': mon_create_initial(args) else: LOG.error('subcommand %s not implemented', args.subcommand) @priority(30) def make(parser): """ Deploy ceph monitor on remote hosts. """ sub_command_help = dedent(""" Subcommands: create-initial Will deploy for monitors defined in `mon initial members`, wait until they form quorum and then gatherkeys, reporting the monitor status along the process. If monitors don't form quorum the command will eventually time out. create Deploy monitors by specifying them like: ceph-deploy mon create node1 node2 node3 If no hosts are passed it will default to use the `mon initial members` defined in the configuration. add Add a monitor to an existing cluster: ceph-deploy mon add node1 Or: ceph-deploy mon add node1 --address 192.168.1.10 If the section for the monitor exists and defines a `mon addr` that will be used, otherwise it will fallback by resolving the hostname to an IP. If `--address` is used it will override all other options. destroy Completely remove monitors on a remote host. Requires hostname(s) as arguments. """) parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.description = sub_command_help parser.add_argument( 'subcommand', choices=[ 'add', 'create', 'create-initial', 'destroy', ], ) parser.add_argument( '--address', nargs='?', dest='address', ) parser.add_argument( 'mon', nargs='*', ) parser.set_defaults( func=mon, ) # # Helpers # def is_running(conn, args): """ Run a command to check the status of a mon, return a boolean. We heavily depend on the format of the output, if that ever changes we need to modify this. Check daemon status for 3 times output of the status should be similar to:: mon.mira094: running {"version":"0.61.5"} or when it fails:: mon.mira094: dead {"version":"0.61.5"} mon.mira094: not running {"version":"0.61.5"} """ stdout, stderr, _ = process.check( conn, args ) result_string = ' '.join(stdout) for run_check in [': running', ' start/running']: if run_check in result_string: return True return False ceph-deploy-1.4.0/ceph_deploy/new.py0000644000076500000240000001172612306157710020133 0ustar alfredostaff00000000000000import errno import logging import os import uuid import struct import time import base64 from ceph_deploy.cliutil import priority from ceph_deploy import conf, hosts, exc from ceph_deploy.util import arg_validators, ssh, net from ceph_deploy.misc import mon_hosts from ceph_deploy.lib.remoto import process from ceph_deploy.connection import get_local_connection LOG = logging.getLogger(__name__) def generate_auth_key(): key = os.urandom(16) header = struct.pack( '>> mon.path('mycluster', 'hostname') /var/lib/ceph/mon/mycluster-myhostname """ return "%s%s" % (base(cluster), hostname) def done(cluster, hostname): """ Example usage:: >>> mon.done('mycluster', 'hostname') /var/lib/ceph/mon/mycluster-myhostname/done """ return join(path(cluster, hostname), 'done') def init(cluster, hostname, init): """ Example usage:: >>> mon.init('mycluster', 'hostname', 'init') /var/lib/ceph/mon/mycluster-myhostname/init """ return join(path(cluster, hostname), init) def keyring(cluster, hostname): """ Example usage:: >>> mon.keyring('mycluster', 'myhostname') /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring """ keyring_file = '%s-%s.mon.keyring' % (cluster, hostname) return join(constants.tmp_path, keyring_file) def asok(cluster, hostname): """ Example usage:: >>> mon.asok('mycluster', 'myhostname') /var/run/ceph/mycluster-mon.myhostname.asok """ asok_file = '%s-mon.%s.asok' % (cluster, hostname) return join(constants.base_run_path, asok_file) def monmap(cluster, hostname): """ Example usage:: >>> mon.monmap('mycluster', 'myhostname') /var/lib/ceph/tmp/mycluster.myhostname.monmap """ monmap mon_map_file = '%s.%s.monmap' % (cluster, hostname) return join(constants.tmp_path, mon_map_file) ceph-deploy-1.4.0/ceph_deploy/util/pkg_managers.py0000644000076500000240000000531612300743144022747 0ustar alfredostaff00000000000000from ceph_deploy.lib.remoto import process def apt(conn, packages, *a, **kw): if isinstance(packages, str): packages = [packages] cmd = [ 'env', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-q', 'install', '--assume-yes', ] cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) def apt_remove(conn, packages, *a, **kw): if isinstance(packages, str): packages = [packages] purge = kw.pop('purge', False) cmd = [ 'apt-get', '-q', 'remove', '-f', '-y', '--force-yes', ] if purge: cmd.append('--purge') cmd.append('--') cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) def apt_update(conn): cmd = [ 'apt-get', '-q', 'update', ] return process.run( conn, cmd, ) def yum(conn, packages, *a, **kw): if isinstance(packages, str): packages = [packages] cmd = [ 'yum', '-y', '-q', 'install', ] cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) def yum_remove(conn, packages, *a, **kw): cmd = [ 'yum', '-y', '-q', 'remove', ] if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) def yum_clean(conn, item=None): item = item or 'all' cmd = [ 'yum', 'clean', item, ] return process.run( conn, cmd, ) def rpm(conn, rpm_args=None, *a, **kw): """ A minimal front end for ``rpm`. Extra flags can be passed in via ``rpm_args`` as an iterable. """ rpm_args = rpm_args or [] cmd = [ 'rpm', '-Uvh', ] cmd.extend(rpm_args) return process.run( conn, cmd, *a, **kw ) def zypper(conn, packages, *a, **kw): if isinstance(packages, str): packages = [packages] cmd = [ 'zypper', '--non-interactive', '--quiet', 'install', ] cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) def zypper_remove(conn, packages, *a, **kw): cmd = [ 'zypper', '--non-interactive', '--quiet', 'remove', ] if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) return process.run( conn, cmd, *a, **kw ) ceph-deploy-1.4.0/ceph_deploy/util/ssh.py0000644000076500000240000000214012245141667021110 0ustar alfredostaff00000000000000import logging from ceph_deploy.lib.remoto import process from ceph_deploy.lib.remoto.connection import needs_ssh from ceph_deploy.connection import get_local_connection def can_connect_passwordless(hostname): """ Ensure that current host can SSH remotely to the remote host using the ``BatchMode`` option to prevent a password prompt. That attempt will error with an exit status of 255 and a ``Permission denied`` message. """ # Ensure we are not doing this for local hosts if not needs_ssh(hostname): return True logger = logging.getLogger(hostname) with get_local_connection(logger) as conn: # Check to see if we can login, disabling password prompts command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname] out, err, retval = process.check(conn, command, stop_on_error=False) expected_error = 'Permission denied ' has_key_error = False for line in err: if expected_error in line: has_key_error = True if retval == 255 and has_key_error: return False return True ceph-deploy-1.4.0/ceph_deploy/util/templates.py0000644000076500000240000000077412312111556022311 0ustar alfredostaff00000000000000 ceph_repo = """ [ceph] name=Ceph packages for $basearch baseurl={repo_url}/$basearch enabled=1 gpgcheck=1 type=rpm-md gpgkey={gpg_url} [ceph-noarch] name=Ceph noarch packages baseurl={repo_url}/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey={gpg_url} [ceph-source] name=Ceph source packages baseurl={repo_url}/SRPMS enabled=0 gpgcheck=1 type=rpm-md gpgkey={gpg_url} """ custom_repo = """ [{repo_name}] name={name} baseurl={baseurl} enabled={enabled} gpgcheck={gpgcheck} type={_type} gpgkey={gpgkey} """ ceph-deploy-1.4.0/ceph_deploy/validate.py0000644000076500000240000000056612236715242021135 0ustar alfredostaff00000000000000import argparse import re ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$') def alphanumeric(s): """ Enforces string to be alphanumeric with leading alpha. """ if not ALPHANUMERIC_RE.match(s): raise argparse.ArgumentTypeError( 'argument must start with a letter and contain only letters and numbers', ) return s ceph-deploy-1.4.0/ceph_deploy.egg-info/0000755000076500000240000000000012312561302020444 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/ceph_deploy.egg-info/dependency_links.txt0000644000076500000240000000000112312561301024511 0ustar alfredostaff00000000000000 ceph-deploy-1.4.0/ceph_deploy.egg-info/entry_points.txt0000644000076500000240000000105412312561301023741 0ustar alfredostaff00000000000000[console_scripts] ceph-deploy = ceph_deploy.cli:main [ceph_deploy.cli] purgedata = ceph_deploy.install:make_purge_data pkg = ceph_deploy.pkg:make mds = ceph_deploy.mds:make forgetkeys = ceph_deploy.forgetkeys:make purge = ceph_deploy.install:make_purge admin = ceph_deploy.admin:make mon = ceph_deploy.mon:make install = ceph_deploy.install:make gatherkeys = ceph_deploy.gatherkeys:make new = ceph_deploy.new:make disk = ceph_deploy.osd:make_disk config = ceph_deploy.config:make osd = ceph_deploy.osd:make uninstall = ceph_deploy.install:make_uninstall ceph-deploy-1.4.0/ceph_deploy.egg-info/PKG-INFO0000644000076500000240000003527312312561301021552 0ustar alfredostaff00000000000000Metadata-Version: 1.0 Name: ceph-deploy Version: 1.4.0 Summary: Deploy Ceph with minimal infrastructure Home-page: https://github.com/ceph/ceph-deploy Author: Inktank Author-email: ceph-devel@vger.kernel.org License: MIT Description: ======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/ But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/debian-{release} ceph.com/debian-testing RPM --- The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/ Make sure you add the proper one for your distribution. But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/rpm-{release} ceph.com/rpm-testing bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin yes PermitEmptyPasswords yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the `wget` env variables to point to the right hosts, for example:: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/devops/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/devops/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub. Keywords: ceph deploy Platform: UNKNOWN ceph-deploy-1.4.0/ceph_deploy.egg-info/requires.txt0000644000076500000240000000001212312561301023034 0ustar alfredostaff00000000000000setuptoolsceph-deploy-1.4.0/ceph_deploy.egg-info/SOURCES.txt0000644000076500000240000000714412312561302022336 0ustar alfredostaff00000000000000LICENSE MANIFEST.in README.rst setup.cfg setup.py vendor.py ceph_deploy/__init__.py ceph_deploy/admin.py ceph_deploy/cli.py ceph_deploy/cliutil.py ceph_deploy/config.py ceph_deploy/connection.py ceph_deploy/exc.py ceph_deploy/forgetkeys.py ceph_deploy/gatherkeys.py ceph_deploy/install.py ceph_deploy/mds.py ceph_deploy/memoize.py ceph_deploy/misc.py ceph_deploy/mon.py ceph_deploy/new.py ceph_deploy/osd.py ceph_deploy/pkg.py ceph_deploy/validate.py ceph_deploy.egg-info/PKG-INFO ceph_deploy.egg-info/SOURCES.txt ceph_deploy.egg-info/dependency_links.txt ceph_deploy.egg-info/entry_points.txt ceph_deploy.egg-info/requires.txt ceph_deploy.egg-info/top_level.txt ceph_deploy/conf/__init__.py ceph_deploy/conf/ceph.py ceph_deploy/conf/cephdeploy.py ceph_deploy/hosts/__init__.py ceph_deploy/hosts/common.py ceph_deploy/hosts/remotes.py ceph_deploy/hosts/centos/__init__.py ceph_deploy/hosts/centos/install.py ceph_deploy/hosts/centos/pkg.py ceph_deploy/hosts/centos/uninstall.py ceph_deploy/hosts/centos/mon/__init__.py ceph_deploy/hosts/centos/mon/create.py ceph_deploy/hosts/debian/__init__.py ceph_deploy/hosts/debian/install.py ceph_deploy/hosts/debian/pkg.py ceph_deploy/hosts/debian/uninstall.py ceph_deploy/hosts/debian/mon/__init__.py ceph_deploy/hosts/debian/mon/create.py ceph_deploy/hosts/fedora/__init__.py ceph_deploy/hosts/fedora/install.py ceph_deploy/hosts/fedora/uninstall.py ceph_deploy/hosts/fedora/mon/__init__.py ceph_deploy/hosts/fedora/mon/create.py ceph_deploy/hosts/suse/__init__.py ceph_deploy/hosts/suse/install.py ceph_deploy/hosts/suse/pkg.py ceph_deploy/hosts/suse/uninstall.py ceph_deploy/hosts/suse/mon/__init__.py ceph_deploy/hosts/suse/mon/create.py ceph_deploy/lib/__init__.py ceph_deploy/lib/remoto/__init__.py ceph_deploy/lib/remoto/connection.py ceph_deploy/lib/remoto/exc.py ceph_deploy/lib/remoto/file_sync.py ceph_deploy/lib/remoto/log.py ceph_deploy/lib/remoto/process.py ceph_deploy/lib/remoto/util.py ceph_deploy/lib/remoto/lib/__init__.py ceph_deploy/lib/remoto/lib/execnet/__init__.py ceph_deploy/lib/remoto/lib/execnet/apipkg.py ceph_deploy/lib/remoto/lib/execnet/deprecated.py ceph_deploy/lib/remoto/lib/execnet/gateway.py ceph_deploy/lib/remoto/lib/execnet/gateway_base.py ceph_deploy/lib/remoto/lib/execnet/gateway_bootstrap.py ceph_deploy/lib/remoto/lib/execnet/gateway_io.py ceph_deploy/lib/remoto/lib/execnet/gateway_socket.py ceph_deploy/lib/remoto/lib/execnet/multi.py ceph_deploy/lib/remoto/lib/execnet/rsync.py ceph_deploy/lib/remoto/lib/execnet/rsync_remote.py ceph_deploy/lib/remoto/lib/execnet/xspec.py ceph_deploy/lib/remoto/lib/execnet/script/__init__.py ceph_deploy/lib/remoto/lib/execnet/script/loop_socketserver.py ceph_deploy/lib/remoto/lib/execnet/script/quitserver.py ceph_deploy/lib/remoto/lib/execnet/script/shell.py ceph_deploy/lib/remoto/lib/execnet/script/socketserver.py ceph_deploy/lib/remoto/lib/execnet/script/socketserverservice.py ceph_deploy/lib/remoto/lib/execnet/script/xx.py ceph_deploy/tests/__init__.py ceph_deploy/tests/conftest.py ceph_deploy/tests/directory.py ceph_deploy/tests/fakes.py ceph_deploy/tests/test_cli.py ceph_deploy/tests/test_cli_install.py ceph_deploy/tests/test_cli_mon.py ceph_deploy/tests/test_cli_new.py ceph_deploy/tests/test_cli_osd.py ceph_deploy/tests/test_conf.py ceph_deploy/tests/test_mon.py ceph_deploy/tests/test_remotes.py ceph_deploy/util/__init__.py ceph_deploy/util/arg_validators.py ceph_deploy/util/constants.py ceph_deploy/util/decorators.py ceph_deploy/util/log.py ceph_deploy/util/net.py ceph_deploy/util/pkg_managers.py ceph_deploy/util/ssh.py ceph_deploy/util/templates.py ceph_deploy/util/paths/__init__.py ceph_deploy/util/paths/mon.py scripts/ceph-deployceph-deploy-1.4.0/ceph_deploy.egg-info/top_level.txt0000644000076500000240000000001412312561301023170 0ustar alfredostaff00000000000000ceph_deploy ceph-deploy-1.4.0/LICENSE0000644000076500000240000000205112236715242015473 0ustar alfredostaff00000000000000Copyright (c) 2012 Inktank Storage, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ceph-deploy-1.4.0/MANIFEST.in0000644000076500000240000000014312245141667016230 0ustar alfredostaff00000000000000include *.rst include LICENSE include scripts/ceph-deploy include vendor.py prune ceph_deploy/test ceph-deploy-1.4.0/PKG-INFO0000644000076500000240000003527312312561302015566 0ustar alfredostaff00000000000000Metadata-Version: 1.0 Name: ceph-deploy Version: 1.4.0 Summary: Deploy Ceph with minimal infrastructure Home-page: https://github.com/ceph/ceph-deploy Author: Inktank Author-email: ceph-devel@vger.kernel.org License: MIT Description: ======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/ But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/debian-{release} ceph.com/debian-testing RPM --- The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/ Make sure you add the proper one for your distribution. But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/rpm-{release} ceph.com/rpm-testing bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin yes PermitEmptyPasswords yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the `wget` env variables to point to the right hosts, for example:: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/devops/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/devops/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub. Keywords: ceph deploy Platform: UNKNOWN ceph-deploy-1.4.0/README.rst0000644000076500000240000002716012300743144016157 0ustar alfredostaff00000000000000======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/ But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/debian-{release} ceph.com/debian-testing RPM --- The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/ Make sure you add the proper one for your distribution. But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: ceph.com/rpm-{release} ceph.com/rpm-testing bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin yes PermitEmptyPasswords yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the `wget` env variables to point to the right hosts, for example:: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/devops/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/devops/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub.ceph-deploy-1.4.0/scripts/0000755000076500000240000000000012312561302016146 5ustar alfredostaff00000000000000ceph-deploy-1.4.0/scripts/ceph-deploy0000755000076500000240000000120312245141667020316 0ustar alfredostaff00000000000000#!/usr/bin/env python import os import platform import sys """ ceph-deploy - admin tool for ceph """ if os.path.exists('/usr/share/pyshared/ceph_deploy'): sys.path.insert(0,'/usr/share/pyshared/ceph_deploy') elif os.path.exists('/usr/share/ceph-deploy'): sys.path.insert(0,'/usr/share/ceph-deploy') elif os.path.exists('/usr/share/pyshared/ceph-deploy'): sys.path.insert(0,'/usr/share/pyshared/ceph-deploy') elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'): sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy') from ceph_deploy.cli import main if __name__ == '__main__': sys.exit(main()) ceph-deploy-1.4.0/setup.cfg0000644000076500000240000000014612312561302016301 0ustar alfredostaff00000000000000[pytest] norecursedirs = .* _* virtualenv [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 ceph-deploy-1.4.0/setup.py0000644000076500000240000000352112303130116016165 0ustar alfredostaff00000000000000from setuptools import setup, find_packages import os import sys import ceph_deploy from vendor import vendorize def read(fname): path = os.path.join(os.path.dirname(__file__), fname) f = open(path) return f.read() install_requires = [] pyversion = sys.version_info[:2] if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1): install_requires.append('argparse') # # Add libraries that are not part of install_requires # vendorize([ ('remoto', '0.0.15'), ]) setup( name='ceph-deploy', version=ceph_deploy.__version__, packages=find_packages(), author='Inktank', author_email='ceph-devel@vger.kernel.org', description='Deploy Ceph with minimal infrastructure', long_description=read('README.rst'), license='MIT', keywords='ceph deploy', url="https://github.com/ceph/ceph-deploy", install_requires=[ 'setuptools', ] + install_requires, tests_require=[ 'pytest >=2.1.3', 'mock >=1.0b1', ], entry_points={ 'console_scripts': [ 'ceph-deploy = ceph_deploy.cli:main', ], 'ceph_deploy.cli': [ 'new = ceph_deploy.new:make', 'install = ceph_deploy.install:make', 'uninstall = ceph_deploy.install:make_uninstall', 'purge = ceph_deploy.install:make_purge', 'purgedata = ceph_deploy.install:make_purge_data', 'mon = ceph_deploy.mon:make', 'gatherkeys = ceph_deploy.gatherkeys:make', 'osd = ceph_deploy.osd:make', 'disk = ceph_deploy.osd:make_disk', 'mds = ceph_deploy.mds:make', 'forgetkeys = ceph_deploy.forgetkeys:make', 'config = ceph_deploy.config:make', 'admin = ceph_deploy.admin:make', 'pkg = ceph_deploy.pkg:make' ], }, ) ceph-deploy-1.4.0/vendor.py0000644000076500000240000000412312245141667016343 0ustar alfredostaff00000000000000import subprocess import os from os import path import traceback error_msg = """ This library depends on sources fetched when packaging that failed to be retrieved. This means that it will *not* work as expected. Errors encountered: """ def run(cmd): print '[vendoring] Running command: %s' % ' '.join(cmd) try: result = subprocess.Popen( cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE ) except Exception as error: print_error([], traceback.format_exc(error).split('\n')) raise SystemExit(1) if result.wait(): print_error(result.stdout.readlines(), result.stderr.readlines()) def print_error(stdout, stderr): print '*'*80 print error_msg for line in stdout: print line for line in stderr: print line print '*'*80 def vendor_library(name, version): this_dir = path.dirname(path.abspath(__file__)) vendor_dest = path.join(this_dir, 'ceph_deploy/lib/%s' % name) vendor_src = path.join(this_dir, name) vendor_module = path.join(vendor_src, name) current_dir = os.getcwd() if path.exists(vendor_src): run(['rm', '-rf', vendor_src]) if path.exists(vendor_dest): module = __import__('ceph_deploy.lib.remoto', globals(), locals(), ['__version__']) if module.__version__ != version: run(['rm', '-rf', vendor_dest]) if not path.exists(vendor_dest): run(['git', 'clone', 'git://ceph.com/%s' % name]) os.chdir(vendor_src) run(['git', 'checkout', version]) run(['mv', vendor_module, vendor_dest]) os.chdir(current_dir) def vendorize(vendor_requirements): """ This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] """ for library in vendor_requirements: name, version = library vendor_library(name, version)