ceph-deploy-2.0.1/0000755000076500000240000000000013312242253014457 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/0000755000076500000240000000000013312242252016751 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/__init__.py0000644000076500000240000000003013312241405021052 0ustar alfredostaff00000000000000 __version__ = '2.0.1' ceph-deploy-2.0.1/ceph_deploy/admin.py0000644000076500000240000000277413243310454020430 0ustar alfredostaff00000000000000import logging from ceph_deploy import exc from ceph_deploy import conf from ceph_deploy.cliutil import priority from ceph_deploy import hosts LOG = logging.getLogger(__name__) def admin(args): conf_data = conf.ceph.load_raw(args) try: with open('%s.client.admin.keyring' % args.cluster, 'rb') as f: keyring = f.read() except: raise RuntimeError('%s.client.admin.keyring not found' % args.cluster) errors = 0 for hostname in args.client: LOG.debug('Pushing admin keys and conf to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.remote_module.write_file( '/etc/ceph/%s.client.admin.keyring' % args.cluster, keyring, 0o600, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to configure %d admin hosts' % errors) @priority(70) def make(parser): """ Push configuration and client.admin key to a remote host. """ parser.add_argument( 'client', metavar='HOST', nargs='+', help='host to configure for Ceph administration', ) parser.set_defaults( func=admin, ) ceph-deploy-2.0.1/ceph_deploy/calamari.py0000644000076500000240000000610212754333353021107 0ustar alfredostaff00000000000000import errno import logging import os from ceph_deploy import hosts, exc from ceph_deploy.lib import remoto LOG = logging.getLogger(__name__) def distro_is_supported(distro_name): """ An enforcer of supported distros that can differ from what ceph-deploy supports. """ supported = ['centos', 'redhat', 'ubuntu', 'debian'] if distro_name in supported: return True return False def connect(args): for hostname in args.hosts: distro = hosts.get(hostname, username=args.username) if not distro_is_supported(distro.normalized_name): raise exc.UnsupportedPlatform( distro.distro_name, distro.codename, distro.release ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.info('assuming that a repository with Calamari packages is already configured.') LOG.info('Refer to the docs for examples (http://ceph.com/ceph-deploy/docs/conf.html)') rlogger = logging.getLogger(hostname) # Emplace minion config prior to installation so that it is present # when the minion first starts. minion_config_dir = os.path.join('/etc/salt/', 'minion.d') minion_config_file = os.path.join(minion_config_dir, 'calamari.conf') rlogger.debug('creating config dir: %s' % minion_config_dir) distro.conn.remote_module.makedir(minion_config_dir, [errno.EEXIST]) rlogger.debug( 'creating the calamari salt config: %s' % minion_config_file ) distro.conn.remote_module.write_file( minion_config_file, ('master: %s\n' % args.master).encode('utf-8') ) distro.packager.install('salt-minion') distro.packager.install('diamond') # redhat/centos need to get the service started if distro.normalized_name in ['redhat', 'centos']: remoto.process.run( distro.conn, ['chkconfig', 'salt-minion', 'on'] ) remoto.process.run( distro.conn, ['service', 'salt-minion', 'start'] ) distro.conn.exit() def calamari(args): if args.subcommand == 'connect': connect(args) def make(parser): """ Install and configure Calamari nodes. Assumes that a repository with Calamari packages is already configured. Refer to the docs for examples (http://ceph.com/ceph-deploy/docs/conf.html) """ calamari_parser = parser.add_subparsers(dest='subcommand') calamari_parser.required = True calamari_connect = calamari_parser.add_parser( 'connect', help='Configure host(s) to connect to Calamari master' ) calamari_connect.add_argument( '--master', nargs='?', metavar='MASTER SERVER', help="The domain for the Calamari master server" ) calamari_connect.add_argument( 'hosts', nargs='+', ) parser.set_defaults( func=calamari, ) ceph-deploy-2.0.1/ceph_deploy/cli.py0000644000076500000240000001233713312237710020103 0ustar alfredostaff00000000000000import pkg_resources import argparse import logging import textwrap import os import sys import ceph_deploy from ceph_deploy import exc from ceph_deploy.util import log from ceph_deploy.util.decorators import catches LOG = logging.getLogger(__name__) __header__ = textwrap.dedent(""" -^- / \\ |O o| ceph-deploy v%s ).-.( '/|||\` | '|` | '|` Full documentation can be found at: http://ceph.com/ceph-deploy/docs """ % ceph_deploy.__version__) def log_flags(args, logger=None): logger = logger or LOG logger.info('ceph-deploy options:') for k, v in args.__dict__.items(): if k.startswith('_'): continue logger.info(' %-30s: %s' % (k, v)) def get_parser(): epilog_text = "See 'ceph-deploy --help' for help on a specific command" parser = argparse.ArgumentParser( prog='ceph-deploy', formatter_class=argparse.RawDescriptionHelpFormatter, description='Easy Ceph deployment\n\n%s' % __header__, epilog=epilog_text ) verbosity = parser.add_mutually_exclusive_group(required=False) verbosity.add_argument( '-v', '--verbose', action='store_true', dest='verbose', default=False, help='be more verbose', ) verbosity.add_argument( '-q', '--quiet', action='store_true', dest='quiet', help='be less verbose', ) parser.add_argument( '--version', action='version', version='%s' % ceph_deploy.__version__, help='the current installed version of ceph-deploy', ) parser.add_argument( '--username', help='the username to connect to the remote host', ) parser.add_argument( '--overwrite-conf', action='store_true', help='overwrite an existing conf file on remote host (if present)', ) parser.add_argument( '--ceph-conf', dest='ceph_conf', help='use (or reuse) a given ceph.conf file', ) sub = parser.add_subparsers( title='commands', metavar='COMMAND', help='description', ) sub.required = True entry_points = [ (ep.name, ep.load()) for ep in pkg_resources.iter_entry_points('ceph_deploy.cli') ] entry_points.sort( key=lambda name_fn: getattr(name_fn[1], 'priority', 100), ) for (name, fn) in entry_points: p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) if not os.environ.get('CEPH_DEPLOY_TEST'): p.set_defaults(cd_conf=ceph_deploy.conf.cephdeploy.load()) # flag if the default release is being used p.set_defaults(default_release=False) fn(p) p.required = True parser.set_defaults( cluster='ceph', ) return parser @catches((KeyboardInterrupt, RuntimeError, exc.DeployError,), handle_all=True) def _main(args=None, namespace=None): # Set console logging first with some defaults, to prevent having exceptions # before hitting logging configuration. The defaults can/will get overridden # later. # Console Logger sh = logging.StreamHandler() sh.setFormatter(log.color_format()) sh.setLevel(logging.WARNING) # because we're in a module already, __name__ is not the ancestor of # the rest of the package; use the root as the logger for everyone root_logger = logging.getLogger() # allow all levels at root_logger, handlers control individual levels root_logger.setLevel(logging.DEBUG) root_logger.addHandler(sh) parser = get_parser() if len(sys.argv) < 2: parser.print_help() sys.exit() else: args = parser.parse_args(args=args, namespace=namespace) console_loglevel = logging.DEBUG # start at DEBUG for now if args.quiet: console_loglevel = logging.WARNING if args.verbose: console_loglevel = logging.DEBUG # Console Logger sh.setLevel(console_loglevel) # File Logger fh = logging.FileHandler('ceph-deploy-{cluster}.log'.format(cluster=args.cluster)) fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter(log.FILE_FORMAT)) root_logger.addHandler(fh) # Reads from the config file and sets values for the global # flags and the given sub-command # the one flag that will never work regardless of the config settings is # logging because we cannot set it before hand since the logging config is # not ready yet. This is the earliest we can do. args = ceph_deploy.conf.cephdeploy.set_overrides(args) LOG.info("Invoked (%s): %s" % ( ceph_deploy.__version__, ' '.join(sys.argv)) ) log_flags(args) return args.func(args) def main(args=None, namespace=None): try: _main(args=args, namespace=namespace) finally: # This block is crucial to avoid having issues with # Python spitting non-sense thread exceptions. We have already # handled what we could, so close stderr and stdout. if not os.environ.get('CEPH_DEPLOY_TEST'): try: sys.stdout.close() except: pass try: sys.stderr.close() except: pass ceph-deploy-2.0.1/ceph_deploy/cliutil.py0000644000076500000240000000027612236715242021005 0ustar alfredostaff00000000000000def priority(num): """ Decorator to add a `priority` attribute to the function. """ def add_priority(fn): fn.priority = num return fn return add_priority ceph-deploy-2.0.1/ceph_deploy/conf/0000755000076500000240000000000013312242252017676 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/conf/__init__.py0000644000076500000240000000007412754333353022024 0ustar alfredostaff00000000000000from . import ceph # noqa from . import cephdeploy # noqa ceph-deploy-2.0.1/ceph_deploy/conf/ceph.py0000644000076500000240000000621012754333353021202 0ustar alfredostaff00000000000000try: import configparser except ImportError: import ConfigParser as configparser import contextlib import sys from ceph_deploy import exc class _TrimIndentFile(object): def __init__(self, fp): self.fp = fp def readline(self): line = self.fp.readline() return line.lstrip(' \t') def __iter__(self): return iter(self.readline, '') class CephConf(configparser.RawConfigParser): def __init__(self, *args, **kwargs): if sys.version_info >= (3, 2): kwargs.setdefault('strict', False) # super() cannot be used with an old-style class configparser.RawConfigParser.__init__(self, *args, **kwargs) def optionxform(self, s): s = s.replace('_', ' ') s = '_'.join(s.split()) return s def safe_get(self, section, key): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: #Use full parent function so we can replace it in the class # if desired return configparser.RawConfigParser.get(self, section, key) except (configparser.NoSectionError, configparser.NoOptionError): return None def parse(fp): cfg = CephConf() ifp = _TrimIndentFile(fp) cfg.readfp(ifp) return cfg def load(args): """ :param args: Will be used to infer the proper configuration name, or if args.ceph_conf is passed in, that will take precedence """ path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster) try: f = open(path) except IOError as e: raise exc.ConfigError( "%s; has `ceph-deploy new` been run in this directory?" % e ) else: with contextlib.closing(f): return parse(f) def load_raw(args): """ Read the actual file *as is* without parsing/modifiying it so that it can be written maintaining its same properties. :param args: Will be used to infer the proper configuration name :paran path: alternatively, use a path for any configuration file loading """ path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster) try: with open(path) as ceph_conf: return ceph_conf.read() except (IOError, OSError) as e: raise exc.ConfigError( "%s; has `ceph-deploy new` been run in this directory?" % e ) def write_conf(cluster, conf, overwrite): """ write cluster configuration to /etc/ceph/{cluster}.conf """ import os path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid()) if os.path.exists(path): with open(path) as f: old = f.read() if old != conf and not overwrite: raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path) with open(tmp, 'w') as f: f.write(conf) f.flush() os.fsync(f) os.rename(tmp, path) ceph-deploy-2.0.1/ceph_deploy/conf/cephdeploy.py0000644000076500000240000001475413207534372022431 0ustar alfredostaff00000000000000try: import configparser except ImportError: import ConfigParser as configparser import logging import os from os import path import re from ceph_deploy.util.paths import gpg logger = logging.getLogger('ceph_deploy.conf') cd_conf_template = """ # # ceph-deploy configuration file # [ceph-deploy-global] # Overrides for some of ceph-deploy's global flags, like verbosity or cluster # name [ceph-deploy-install] # Overrides for some of ceph-deploy's install flags, like version of ceph to # install # # Repositories section # # yum repos: # [myrepo] # baseurl = http://gitbuilder.ceph.com/ceph-rpm-centos7-x86_64-basic/ref/hammer # gpgurl = https://download.ceph.com/keys/autobuild.asc # default = True # extra-repos = cephrepo # will install the cephrepo file too # # [cephrepo] # name=ceph repo noarch packages # baseurl=http://download.ceph.com/rpm-hammer/el6/noarch # enabled=1 # gpgcheck=1 # type=rpm-md # gpgkey=https://download.ceph.com/keys/release.asc # apt repos: # [myrepo] # baseurl = http://gitbuilder.ceph.com/ceph-deb-trusty-x86_64-basic/ref/hammer # gpgurl = https://download.ceph.com/keys/autobuild.asc # default = True # extra-repos = cephrepo # will install the cephrepo file too # # [cephrepo] # baseurl=http://download.ceph.com/debian-hammer # gpgkey=https://download.ceph.com/keys/release.asc """.format(gpgurl=gpg.url('release')) def location(): """ Find and return the location of the ceph-deploy configuration file. If this file does not exist, create one in a default location. """ return _locate_or_create() def load(): parser = Conf() parser.read(location()) return parser def _locate_or_create(): home_config = path.expanduser('~/.cephdeploy.conf') # With order of importance locations = [ path.join(os.getcwd(), 'cephdeploy.conf'), home_config, ] for location in locations: if path.exists(location): logger.debug('found configuration file at: %s' % location) return location logger.info('could not find configuration file, will create one in $HOME') create_stub(home_config) return home_config def create_stub(_path=None): _path = _path or path.expanduser('~/.cephdeploy.conf') logger.debug('creating new configuration file: %s' % _path) with open(_path, 'w') as cd_conf: cd_conf.write(cd_conf_template) def set_overrides(args, _conf=None): """ Read the configuration file and look for ceph-deploy sections to set flags/defaults from the values found. This will alter the ``args`` object that is created by argparse. """ # Get the subcommand name to avoid overwritting values from other # subcommands that are not going to be used subcommand = args.func.__name__ command_section = 'ceph-deploy-%s' % subcommand conf = _conf or load() for section_name in conf.sections(): if section_name in ['ceph-deploy-global', command_section]: override_subcommand( section_name, conf.items(section_name), args ) return args def override_subcommand(section_name, section_items, args): """ Given a specific section in the configuration file that maps to a subcommand (except for the global section) read all the keys that are actual argument flags and slap the values for that one subcommand. Return the altered ``args`` object at the end. """ # XXX We are not coercing here any int-like values, so if ArgParse # does that in the CLI we are totally non-compliant with that expectation # but we will try and infer a few boolean values # acceptable boolean states for flags _boolean_states = {'yes': True, 'true': True, 'on': True, 'no': False, 'false': False, 'off': False} for k, v, in section_items: # get the lower case value of `v`, fallback to the booleanized # (original) value of `v` try: normalized_value = v.lower() except AttributeError: # probably not a string object that has .lower normalized_value = v value = _boolean_states.get(normalized_value, v) setattr(args, k, value) return args class Conf(configparser.SafeConfigParser): """ Subclasses from SafeConfigParser to give a few helpers for the ceph-deploy configuration. Specifically, it addresses the need to work with custom sections that signal the usage of custom repositories. """ reserved_sections = ['ceph-deploy-global', 'ceph-deploy-install'] def get_safe(self, section, key, default=None): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: return self.get(section, key) except (configparser.NoSectionError, configparser.NoOptionError): return default def get_repos(self): """ Return all the repo sections from the config, excluding the ceph-deploy reserved sections. """ return [ section for section in self.sections() if section not in self.reserved_sections ] @property def has_repos(self): """ boolean to reflect having (or not) any repository sections """ for section in self.sections(): if section not in self.reserved_sections: return True return False def get_list(self, section, key): """ Assumes that the value for a given key is going to be a list separated by commas. It gets rid of trailing comments. If just one item is present it returns a list with a single item, if no key is found an empty list is returned. """ value = self.get_safe(section, key, []) if value == []: return value # strip comments value = re.split(r'\s+#', value)[0] # split on commas value = value.split(',') # strip spaces return [x.strip() for x in value] def get_default_repo(self): """ Go through all the repositories defined in the config file and search for a truthy value for the ``default`` key. If there isn't any return None. """ for repo in self.get_repos(): if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'): return repo return False ceph-deploy-2.0.1/ceph_deploy/config.py0000644000076500000240000000617612754333353020616 0ustar alfredostaff00000000000000import logging import os.path from ceph_deploy import exc from ceph_deploy import conf from ceph_deploy.cliutil import priority from ceph_deploy import hosts LOG = logging.getLogger(__name__) def config_push(args): conf_data = conf.ceph.load_raw(args) errors = 0 for hostname in args.client: LOG.debug('Pushing config to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to config %d hosts' % errors) def config_pull(args): topath = '{cluster}.conf'.format(cluster=args.cluster) frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) errors = 0 for hostname in args.client: try: LOG.debug('Checking %s for %s', hostname, frompath) distro = hosts.get(hostname, username=args.username) conf_file_contents = distro.conn.remote_module.get_file(frompath) if conf_file_contents is not None: LOG.debug('Got %s from %s', frompath, hostname) if os.path.exists(topath): with open(topath, 'rb') as f: existing = f.read() if existing != conf_file_contents and not args.overwrite_conf: LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) raise with open(topath, 'wb') as f: f.write(conf_file_contents) return distro.conn.exit() LOG.debug('Empty or missing %s on %s', frompath, hostname) except: LOG.error('Unable to pull %s from %s', frompath, hostname) finally: errors += 1 raise exc.GenericError('Failed to fetch config from %d hosts' % errors) def config(args): if args.subcommand == 'push': config_push(args) elif args.subcommand == 'pull': config_pull(args) else: LOG.error('subcommand %s not implemented', args.subcommand) @priority(70) def make(parser): """ Copy ceph.conf to/from remote host(s) """ config_parser = parser.add_subparsers(dest='subcommand') config_parser.required = True config_push = config_parser.add_parser( 'push', help='push Ceph config file to one or more remote hosts' ) config_push.add_argument( 'client', metavar='HOST', nargs='+', help='host(s) to push the config file to', ) config_pull = config_parser.add_parser( 'pull', help='pull Ceph config file from one or more remote hosts' ) config_pull.add_argument( 'client', metavar='HOST', nargs='+', help='host(s) to pull the config file from', ) parser.set_defaults( func=config, ) ceph-deploy-2.0.1/ceph_deploy/connection.py0000644000076500000240000000240012654161364021472 0ustar alfredostaff00000000000000import socket from ceph_deploy.lib import remoto def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True): """ A very simple helper, meant to return a connection that will know about the need to use sudo. """ if username: hostname = "%s@%s" % (username, hostname) try: conn = remoto.Connection( hostname, logger=logger, threads=threads, detect_sudo=detect_sudo, ) # Set a timeout value in seconds to disconnect and move on # if no data is sent back. conn.global_timeout = 300 logger.debug("connected to host: %s " % hostname) return conn except Exception as error: msg = "connecting to host: %s " % hostname errors = "resulted in errors: %s %s" % (error.__class__.__name__, error) raise RuntimeError(msg + errors) def get_local_connection(logger, use_sudo=False): """ Helper for local connections that are sometimes needed to operate on local hosts """ return get_connection( socket.gethostname(), # cannot rely on 'localhost' here None, logger=logger, threads=1, use_sudo=use_sudo, detect_sudo=False ) ceph-deploy-2.0.1/ceph_deploy/exc.py0000644000076500000240000000517712620214647020124 0ustar alfredostaff00000000000000class DeployError(Exception): """ Unknown deploy error """ def __str__(self): doc = self.__doc__.strip() return ': '.join([doc] + [str(a) for a in self.args]) class UnableToResolveError(DeployError): """ Unable to resolve host """ class ClusterExistsError(DeployError): """ Cluster config exists already """ class ConfigError(DeployError): """ Cannot load config """ class NeedHostError(DeployError): """ No hosts specified to deploy to. """ class NeedMonError(DeployError): """ Cannot find nodes with ceph-mon. """ class NeedDiskError(DeployError): """ Must supply disk/path argument """ class UnsupportedPlatform(DeployError): """ Platform is not supported """ def __init__(self, distro, codename, release): self.distro = distro self.codename = codename self.release = release def __str__(self): return '{doc}: {distro} {codename} {release}'.format( doc=self.__doc__.strip(), distro=self.distro, codename=self.codename, release=self.release, ) class ExecutableNotFound(DeployError): """ Could not locate executable """ def __init__(self, executable, host): self.executable = executable self.host = host def __str__(self): return "{doc} '{executable}' make sure it is installed and available on {host}".format( doc=self.__doc__.strip(), executable=self.executable, host=self.host, ) class MissingPackageError(DeployError): """ A required package or command is missing """ def __init__(self, message): self.message = message def __str__(self): return self.message class GenericError(DeployError): def __init__(self, message): self.message = message def __str__(self): return self.message class ClusterNameError(DeployError): """ Problem encountered with custom cluster name """ def __init__(self, message): self.message = message def __str__(self): return self.message class KeyNotFoundError(DeployError): """ Could not find keyring file """ def __init__(self, keyring, hosts): self.keyring = keyring self.hosts = hosts def __str__(self): return '{doc}: {keys}'.format( doc=self.__doc__.strip(), keys=', '.join( [self.keyring.format(hostname=host) + " on host {hostname}".format(hostname=host) for host in self.hosts] ) ) ceph-deploy-2.0.1/ceph_deploy/forgetkeys.py0000644000076500000240000000133512754333353021523 0ustar alfredostaff00000000000000import logging import errno from .cliutil import priority LOG = logging.getLogger(__name__) def forgetkeys(args): import os for f in [ 'mon', 'client.admin', 'bootstrap-osd', 'bootstrap-mds', 'bootstrap-rgw', ]: try: os.unlink('{cluster}.{what}.keyring'.format( cluster=args.cluster, what=f, )) except OSError as e: if e.errno == errno.ENOENT: pass else: raise @priority(100) def make(parser): """ Remove authentication keys from the local directory. """ parser.set_defaults( func=forgetkeys, ) ceph-deploy-2.0.1/ceph_deploy/gatherkeys.py0000644000076500000240000002253613243310456021506 0ustar alfredostaff00000000000000import errno import os.path import logging import json import tempfile import shutil import time from ceph_deploy import hosts from ceph_deploy.cliutil import priority from ceph_deploy.lib import remoto import ceph_deploy.util.paths.mon LOG = logging.getLogger(__name__) def _keyring_equivalent(keyring_one, keyring_two): """ Check two keyrings are identical """ def keyring_extract_key(file_path): """ Cephx keyring files may or may not have white space before some lines. They may have some values in quotes, so a safe way to compare is to extract the key. """ with open(file_path) as f: for line in f: content = line.strip() if len(content) == 0: continue split_line = content.split('=') if split_line[0].strip() == 'key': return "=".join(split_line[1:]).strip() raise RuntimeError("File '%s' is not a keyring" % file_path) key_one = keyring_extract_key(keyring_one) key_two = keyring_extract_key(keyring_two) return key_one == key_two def keytype_path_to(args, keytype): """ Get the local filename for a keyring type """ if keytype == "admin": return '{cluster}.client.admin.keyring'.format( cluster=args.cluster) if keytype == "mon": return '{cluster}.mon.keyring'.format( cluster=args.cluster) return '{cluster}.bootstrap-{what}.keyring'.format( cluster=args.cluster, what=keytype) def keytype_identity(keytype): """ Get the keyring identity from keyring type. This is used in authentication with keyrings and generating keyrings. """ ident_dict = { 'admin' : 'client.admin', 'mds' : 'client.bootstrap-mds', 'mgr' : 'client.bootstrap-mgr', 'osd' : 'client.bootstrap-osd', 'rgw' : 'client.bootstrap-rgw', 'mon' : 'mon.' } return ident_dict.get(keytype, None) def keytype_capabilities(keytype): """ Get the capabilities of a keyring from keyring type. """ cap_dict = { 'admin' : [ 'osd', 'allow *', 'mds', 'allow *', 'mon', 'allow *', 'mgr', 'allow *' ], 'mds' : [ 'mon', 'allow profile bootstrap-mds' ], 'mgr' : [ 'mon', 'allow profile bootstrap-mgr' ], 'osd' : [ 'mon', 'allow profile bootstrap-osd' ], 'rgw': [ 'mon', 'allow profile bootstrap-rgw' ] } return cap_dict.get(keytype, None) def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir): """ Get or create the keyring from the mon using the mon keyring by keytype and copy to dest_dir """ args_prefix = [ '/usr/bin/ceph', '--connect-timeout=25', '--cluster={cluster}'.format( cluster=args.cluster), '--name', 'mon.', '--keyring={keypath}'.format( keypath=keypath), ] identity = keytype_identity(keytype) if identity is None: raise RuntimeError('Could not find identity for keytype:%s' % keytype) capabilites = keytype_capabilities(keytype) if capabilites is None: raise RuntimeError('Could not find capabilites for keytype:%s' % keytype) # First try getting the key if it already exists, to handle the case where # it exists but doesn't match the caps we would pass into get-or-create. # This is the same behvaior as in newer ceph-create-keys out, err, code = remoto.process.check( distro.conn, args_prefix + ['auth', 'get', identity] ) if code == errno.ENOENT: out, err, code = remoto.process.check( distro.conn, args_prefix + ['auth', 'get-or-create', identity] + capabilites ) if code != 0: rlogger.error( '"ceph auth get-or-create for keytype %s returned %s', keytype, code ) for line in err: rlogger.debug(line) return False keyring_name_local = keytype_path_to(args, keytype) keyring_path_local = os.path.join(dest_dir, keyring_name_local) with open(keyring_path_local, 'wb') as f: for line in out: f.write(line + b'\n') return True def gatherkeys_with_mon(args, host, dest_dir): """ Connect to mon and gather keys if mon is in quorum. """ distro = hosts.get(host, username=args.username) remote_hostname = distro.conn.remote_module.shortname() dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname) path_keytype_mon = "%s/keyring" % (dir_keytype_mon) mon_key = distro.conn.remote_module.get_file(path_keytype_mon) if mon_key is None: LOG.warning("No mon key found in host: %s", host) return False mon_name_local = keytype_path_to(args, "mon") mon_path_local = os.path.join(dest_dir, mon_name_local) with open(mon_path_local, 'wb') as f: f.write(mon_key) rlogger = logging.getLogger(host) path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname) out, err, code = remoto.process.check( distro.conn, [ "/usr/bin/ceph", "--connect-timeout=25", "--cluster={cluster}".format( cluster=args.cluster), "--admin-daemon={asok}".format( asok=path_asok), "mon_status" ] ) if code != 0: rlogger.error('"ceph mon_status %s" returned %s', host, code) for line in err: rlogger.debug(line) return False try: mon_status = json.loads(b''.join(out).decode('utf-8')) except ValueError: rlogger.error('"ceph mon_status %s" output was not json', host) for line in out: rlogger.error(line) return False mon_number = None mon_map = mon_status.get('monmap') if mon_map is None: rlogger.error("could not find mon map for mons on '%s'", host) return False mon_quorum = mon_status.get('quorum') if mon_quorum is None: rlogger.error("could not find quorum for mons on '%s'" , host) return False mon_map_mons = mon_map.get('mons') if mon_map_mons is None: rlogger.error("could not find mons in monmap on '%s'", host) return False for mon in mon_map_mons: if mon.get('name') == remote_hostname: mon_number = mon.get('rank') break if mon_number is None: rlogger.error("could not find '%s' in monmap", remote_hostname) return False if not mon_number in mon_quorum: rlogger.error("Not yet quorum for '%s'", host) return False for keytype in ["admin", "mds", "mgr", "osd", "rgw"]: if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): # We will return failure if we fail to gather any key rlogger.error("Failed to return '%s' key from host %s", keytype, host) return False return True def gatherkeys(args): """ Gather keys from any mon and store in current working directory. Backs up keys from previous installs and stores new keys. """ oldmask = os.umask(0o77) try: try: tmpd = tempfile.mkdtemp() LOG.info("Storing keys in temp directory %s", tmpd) sucess = False for host in args.mon: sucess = gatherkeys_with_mon(args, host, tmpd) if sucess: break if not sucess: LOG.error("Failed to connect to host:%s" ,', '.join(args.mon)) raise RuntimeError('Failed to connect any mon') had_error = False date_string = time.strftime("%Y%m%d%H%M%S") for keytype in ["admin", "mds", "mgr", "mon", "osd", "rgw"]: filename = keytype_path_to(args, keytype) tmp_path = os.path.join(tmpd, filename) if not os.path.exists(tmp_path): LOG.error("No key retrived for '%s'" , keytype) had_error = True continue if not os.path.exists(filename): LOG.info("Storing %s" % (filename)) shutil.move(tmp_path, filename) continue if _keyring_equivalent(tmp_path, filename): LOG.info("keyring '%s' already exists" , filename) continue backup_keyring = "%s-%s" % (filename, date_string) LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring) shutil.copy(filename, backup_keyring) shutil.move(tmp_path, filename) if had_error: raise RuntimeError('Failed to get all key types') finally: LOG.info("Destroy temp directory %s" %(tmpd)) shutil.rmtree(tmpd) finally: os.umask(oldmask) @priority(40) def make(parser): """ Gather authentication keys for provisioning new nodes. """ parser.add_argument( 'mon', metavar='HOST', nargs='+', help='monitor host to pull keys from', ) parser.set_defaults( func=gatherkeys, ) ceph-deploy-2.0.1/ceph_deploy/hosts/0000755000076500000240000000000013312242252020111 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/__init__.py0000644000076500000240000001227013277045417022242 0ustar alfredostaff00000000000000""" We deal (mostly) with remote hosts. To avoid special casing each different commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to that remote host and set all the special cases for running commands depending on the type of distribution/version we are dealing with. """ import logging from ceph_deploy import exc from ceph_deploy.util import versions from ceph_deploy.hosts import debian, centos, fedora, suse, remotes, rhel, arch from ceph_deploy.connection import get_connection logger = logging.getLogger() def get(hostname, username=None, fallback=None, detect_sudo=True, use_rhceph=False, callbacks=None): """ Retrieve the module that matches the distribution of a ``hostname``. This function will connect to that host and retrieve the distribution information, then return the appropriate module and slap a few attributes to that module defining the information it found from the hostname. For example, if host ``node1.example.com`` is an Ubuntu server, the ``debian`` module would be returned and the following would be set:: module.name = 'ubuntu' module.release = '12.04' module.codename = 'precise' :param hostname: A hostname that is reachable/resolvable over the network :param fallback: Optional fallback to use if no supported distro is found :param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or the community distro. Changes what host module is returned for RHEL. :params callbacks: A list of callables that accept one argument (the actual module that contains the connection) that will be called, in order at the end of the instantiation of the module. """ conn = get_connection( hostname, username=username, logger=logging.getLogger(hostname), detect_sudo=detect_sudo ) try: conn.import_module(remotes) except IOError as error: if 'already closed' in getattr(error, 'message', ''): raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname) distro_name, release, codename = conn.remote_module.platform_information() if not codename or not _get_distro(distro_name): raise exc.UnsupportedPlatform( distro=distro_name, codename=codename, release=release) machine_type = conn.remote_module.machine_type() module = _get_distro(distro_name, use_rhceph=use_rhceph) module.name = distro_name module.normalized_name = _normalized_distro_name(distro_name) module.normalized_release = _normalized_release(release) module.distro = module.normalized_name module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo'] module.is_rpm = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo'] module.is_deb = module.normalized_name in ['debian', 'ubuntu'] module.is_pkgtarxz = module.normalized_name in ['arch'] module.release = release module.codename = codename module.conn = conn module.machine_type = machine_type module.init = module.choose_init(module) module.packager = module.get_packager(module) # execute each callback if any if callbacks: for c in callbacks: c(module) return module def _get_distro(distro, fallback=None, use_rhceph=False): if not distro: return distro = _normalized_distro_name(distro) distributions = { 'debian': debian, 'ubuntu': debian, 'centos': centos, 'scientific': centos, 'oracle': centos, 'redhat': centos, 'fedora': fedora, 'suse': suse, 'virtuozzo': centos, 'arch': arch } if distro == 'redhat' and use_rhceph: return rhel else: return distributions.get(distro) or _get_distro(fallback) def _normalized_distro_name(distro): distro = distro.lower() if distro.startswith(('redhat', 'red hat')): return 'redhat' elif distro.startswith(('scientific', 'scientific linux')): return 'scientific' elif distro.startswith('oracle'): return 'oracle' elif distro.startswith(('suse', 'opensuse', 'sles')): return 'suse' elif distro.startswith('centos'): return 'centos' elif distro.startswith('linuxmint'): return 'ubuntu' elif distro.startswith('virtuozzo'): return 'virtuozzo' elif distro.startswith('arch'): return 'arch' return distro def _normalized_release(release): """ A normalizer function to make sense of distro release versions. Returns an object with: major, minor, patch, and garbage These attributes can be accessed as ints with prefixed "int" attribute names, for example: normalized_version.int_major """ # TODO: at some point deprecate this function so that we just # use this class directly (and update every test that calls it return versions.NormalizedVersion(release) ceph-deploy-2.0.1/ceph_deploy/hosts/arch/0000755000076500000240000000000013312242252021026 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/arch/__init__.py0000644000076500000240000000104413277045417023154 0ustar alfredostaff00000000000000from . import mon # noqa from ceph_deploy.hosts.centos.install import repo_install # noqa from .install import install, mirror_install # noqa from .uninstall import uninstall # noqa from ceph_deploy.util import pkg_managers # Allow to set some information about this distro # distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ return 'systemd' def get_packager(module): return pkg_managers.Pacman(module) ceph-deploy-2.0.1/ceph_deploy/hosts/arch/install.py0000644000076500000240000000216313277045417023066 0ustar alfredostaff00000000000000from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa from ceph_deploy.hosts.common import map_components from ceph_deploy.util.system import enable_service, start_service NON_SPLIT_PACKAGES = [ 'ceph-osd', 'ceph-radosgw', 'ceph-mds', 'ceph-mon', 'ceph-mgr', 'ceph-common', 'ceph-test' ] SYSTEMD_UNITS = [ 'ceph.target', 'ceph-radosgw.target', 'ceph-rbd-mirror.target', 'ceph-fuse.target', 'ceph-mds.target', 'ceph-mon.target', 'ceph-mgr.target', 'ceph-osd.target', ] SYSTEMD_UNITS_SKIP_START = [ 'ceph-mgr.target', 'ceph-mon.target', ] SYSTEMD_UNITS_SKIP_ENABLE = [ ] def install(distro, version_kind, version, adjust_repos, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) distro.packager.install( packages ) # Start and enable services for unit in SYSTEMD_UNITS: if unit not in SYSTEMD_UNITS_SKIP_START: start_service(distro.conn, unit) if unit not in SYSTEMD_UNITS_SKIP_ENABLE: enable_service(distro.conn, unit) ceph-deploy-2.0.1/ceph_deploy/hosts/arch/mon/0000755000076500000240000000000013312242252021617 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/arch/mon/__init__.py0000644000076500000240000000017613277045417023752 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/arch/uninstall.py0000644000076500000240000000211113277045417023422 0ustar alfredostaff00000000000000import logging from ceph_deploy.util.system import disable_service, stop_service from ceph_deploy.lib import remoto SYSTEMD_UNITS = [ 'ceph-mds.target', 'ceph-mon.target', 'ceph-osd.target', 'ceph-radosgw.target', 'ceph-fuse.target', 'ceph-mgr.target', 'ceph-rbd-mirror.target', 'ceph.target', ] def uninstall(distro, purge=False): packages = [ 'ceph', ] hostname = distro.conn.hostname LOG = logging.getLogger(hostname) # I need to stop and disable services prior package removal LOG.info('stopping and disabling services on {}'.format(hostname)) for unit in SYSTEMD_UNITS: stop_service(distro.conn, unit) disable_service(distro.conn, unit) # remoto.process.run( # distro.conn, # [ # 'systemctl', # 'daemon-reload', # ] # ) LOG.info('uninstalling packages on {}'.format(hostname)) distro.packager.remove(packages) remoto.process.run( distro.conn, [ 'systemctl', 'reset-failed', ] ) ceph-deploy-2.0.1/ceph_deploy/hosts/centos/0000755000076500000240000000000013312242252021404 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/centos/__init__.py0000644000076500000240000000146313243310456023526 0ustar alfredostaff00000000000000from . import mon # noqa from .install import install, mirror_install, repo_install, repository_url_part, rpm_dist # noqa from .uninstall import uninstall # noqa from ceph_deploy.util import pkg_managers from ceph_deploy.util.system import is_systemd # Allow to set some information about this distro # distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ if module.normalized_release.int_major < 7: return 'sysvinit' if is_systemd(module.conn): return 'systemd' if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): return 'sysvinit' return 'systemd' def get_packager(module): return pkg_managers.Yum(module) ceph-deploy-2.0.1/ceph_deploy/hosts/centos/install.py0000644000076500000240000001641313243310457023437 0ustar alfredostaff00000000000000import logging from ceph_deploy.util import templates from ceph_deploy.lib import remoto from ceph_deploy.hosts.common import map_components from ceph_deploy.util.paths import gpg from ceph_deploy.util import net LOG = logging.getLogger(__name__) NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] def rpm_dist(distro): if distro.normalized_name in ['redhat', 'centos', 'scientific', 'oracle', 'virtuozzo'] and distro.normalized_release.int_major >= 6: return 'el' + distro.normalized_release.major return 'el6' def repository_url_part(distro): """ Historically everything CentOS, RHEL, and Scientific has been mapped to `el6` urls, but as we are adding repositories for `rhel`, the URLs should map correctly to, say, `rhel6` or `rhel7`. This function looks into the `distro` object and determines the right url part for the given distro, falling back to `el6` when all else fails. Specifically to work around the issue of CentOS vs RHEL:: >>> import platform >>> platform.linux_distribution() ('Red Hat Enterprise Linux Server', '7.0', 'Maipo') """ if distro.normalized_release.int_major >= 6: if distro.normalized_name == 'redhat': return 'rhel' + distro.normalized_release.major if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']: return 'el' + distro.normalized_release.major return 'el6' def install(distro, version_kind, version, adjust_repos, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) gpgcheck = kw.pop('gpgcheck', 1) logger = distro.conn.logger machine = distro.machine_type repo_part = repository_url_part(distro) dist = rpm_dist(distro) distro.packager.clean() # Get EPEL installed before we continue: if adjust_repos: distro.packager.install('epel-release') distro.packager.install('yum-plugin-priorities') distro.conn.remote_module.enable_yum_priority_obsoletes() logger.warning('check_obsoletes has been enabled for Yum priorities plugin') if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' if adjust_repos: if version_kind in ['stable', 'testing']: distro.packager.add_repo_gpg_key(gpg.url(key)) if version_kind == 'stable': url = 'https://download.ceph.com/rpm-{version}/{repo}/'.format( version=version, repo=repo_part, ) elif version_kind == 'testing': url = 'https://download.ceph.com/rpm-testing/{repo}/'.format(repo=repo_part) # remove any old ceph-release package from prevoius release remoto.process.run( distro.conn, [ 'yum', 'remove', '-y', 'ceph-release' ], ) remoto.process.run( distro.conn, [ 'yum', 'install', '-y', '{url}noarch/ceph-release-1-0.{dist}.noarch.rpm'.format(url=url, dist=dist), ], ) elif version_kind in ['dev', 'dev_commit']: logger.info('skipping install of ceph-release package') logger.info('repo file will be created manually') shaman_url = 'https://shaman.ceph.com/api/repos/ceph/{version}/{sha1}/{distro}/{distro_version}/repo/?arch={arch}'.format( distro=distro.normalized_name, distro_version=distro.normalized_release.major, version=kw['args'].dev, sha1=kw['args'].dev_commit or 'latest', arch=machine ) LOG.debug('fetching repo information from: %s' % shaman_url) content = net.get_chacra_repo(shaman_url) mirror_install( distro, '', # empty repo_url None, # no need to use gpg here, repos are unsigned adjust_repos=True, extra_installs=False, gpgcheck=gpgcheck, repo_content=content ) else: raise Exception('unrecognized version_kind %s' % version_kind) # set the right priority logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority') distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) logger.warning('altered ceph.repo priorities to contain: priority=1') if packages: distro.packager.install(packages) def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) repo_url = repo_url.strip('/') # Remove trailing slashes gpgcheck = kw.pop('gpgcheck', 1) distro.packager.clean() if adjust_repos: if gpg_url: distro.packager.add_repo_gpg_key(gpg_url) ceph_repo_content = templates.ceph_repo.format( repo_url=repo_url, gpg_url=gpg_url, gpgcheck=gpgcheck, ) content = kw.get('repo_content', ceph_repo_content) distro.conn.remote_module.write_yum_repo(content) # set the right priority if distro.packager.name == 'yum': distro.packager.install('yum-plugin-priorities') distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) distro.conn.logger.warning('altered ceph.repo priorities to contain: priority=1') if extra_installs and packages: distro.packager.install(packages) def repo_install(distro, reponame, baseurl, gpgkey, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) logger = distro.conn.logger # Get some defaults name = kw.pop('name', '%s repo' % reponame) enabled = kw.pop('enabled', 1) gpgcheck = kw.pop('gpgcheck', 1) install_ceph = kw.pop('install_ceph', False) proxy = kw.pop('proxy', '') # will get ignored if empty _type = 'repo-md' baseurl = baseurl.strip('/') # Remove trailing slashes distro.packager.clean() if gpgkey: distro.packager.add_repo_gpg_key(gpgkey) repo_content = templates.custom_repo( reponame=reponame, name=name, baseurl=baseurl, enabled=enabled, gpgcheck=gpgcheck, _type=_type, gpgkey=gpgkey, proxy=proxy, **kw ) distro.conn.remote_module.write_yum_repo( repo_content, "%s.repo" % reponame ) repo_path = '/etc/yum.repos.d/{reponame}.repo'.format(reponame=reponame) # set the right priority if kw.get('priority'): if distro.packager.name == 'yum': distro.packager.install('yum-plugin-priorities') distro.conn.remote_module.set_repo_priority([reponame], repo_path) logger.warning('altered {reponame}.repo priorities to contain: priority=1'.format( reponame=reponame) ) # Some custom repos do not need to install ceph if install_ceph and packages: distro.packager.install(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/centos/mon/0000755000076500000240000000000013312242252022175 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/centos/mon/__init__.py0000644000076500000240000000017613243310455024316 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/centos/uninstall.py0000644000076500000240000000033012620214647023773 0ustar alfredostaff00000000000000def uninstall(distro, purge=False): packages = [ 'ceph', 'ceph-release', 'ceph-common', 'ceph-radosgw', ] distro.packager.remove(packages) distro.packager.clean() ceph-deploy-2.0.1/ceph_deploy/hosts/common.py0000644000076500000240000001706713243310455021772 0ustar alfredostaff00000000000000from ceph_deploy.util import paths from ceph_deploy import conf from ceph_deploy.lib import remoto from ceph_deploy.util import constants from ceph_deploy.util import system def ceph_version(conn): """ Log the remote ceph-version by calling `ceph --version` """ return remoto.process.run(conn, ['ceph', '--version']) def mon_create(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() logger = distro.conn.logger logger.debug('remote hostname: %s' % hostname) path = paths.mon.path(args.cluster, hostname) uid = distro.conn.remote_module.path_getuid(constants.base_path) gid = distro.conn.remote_module.path_getgid(constants.base_path) done_path = paths.mon.done(args.cluster, hostname) init_path = paths.mon.init(args.cluster, hostname, distro.init) conf_data = conf.ceph.load_raw(args) # write the configuration file distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) # if the mon path does not exist, create it distro.conn.remote_module.create_mon_path(path, uid, gid) logger.debug('checking for done path: %s' % done_path) if not distro.conn.remote_module.path_exists(done_path): logger.debug('done path does not exist: %s' % done_path) if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) keyring = paths.mon.keyring(args.cluster, hostname) logger.info('creating keyring file: %s' % keyring) distro.conn.remote_module.write_monitor_keyring( keyring, monitor_keyring, uid, gid, ) user_args = [] if uid != 0: user_args = user_args + [ '--setuser', str(uid) ] if gid != 0: user_args = user_args + [ '--setgroup', str(gid) ] remoto.process.run( distro.conn, [ 'ceph-mon', '--cluster', args.cluster, '--mkfs', '-i', hostname, '--keyring', keyring, ] + user_args ) logger.info('unlinking keyring file %s' % keyring) distro.conn.remote_module.unlink(keyring) # create the done file distro.conn.remote_module.create_done_path(done_path, uid, gid) # create init path distro.conn.remote_module.create_init_path(init_path, uid, gid) # start mon service start_mon_service(distro, args.cluster, hostname) def mon_add(distro, args, monitor_keyring): hostname = distro.conn.remote_module.shortname() logger = distro.conn.logger path = paths.mon.path(args.cluster, hostname) uid = distro.conn.remote_module.path_getuid(constants.base_path) gid = distro.conn.remote_module.path_getgid(constants.base_path) monmap_path = paths.mon.monmap(args.cluster, hostname) done_path = paths.mon.done(args.cluster, hostname) init_path = paths.mon.init(args.cluster, hostname, distro.init) conf_data = conf.ceph.load_raw(args) # write the configuration file distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) # if the mon path does not exist, create it distro.conn.remote_module.create_mon_path(path, uid, gid) logger.debug('checking for done path: %s' % done_path) if not distro.conn.remote_module.path_exists(done_path): logger.debug('done path does not exist: %s' % done_path) if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) keyring = paths.mon.keyring(args.cluster, hostname) logger.info('creating keyring file: %s' % keyring) distro.conn.remote_module.write_monitor_keyring( keyring, monitor_keyring, uid, gid, ) # get the monmap remoto.process.run( distro.conn, [ 'ceph', '--cluster', args.cluster, 'mon', 'getmap', '-o', monmap_path, ], ) # now use it to prepare the monitor's data dir user_args = [] if uid != 0: user_args = user_args + [ '--setuser', str(uid) ] if gid != 0: user_args = user_args + [ '--setgroup', str(gid) ] remoto.process.run( distro.conn, [ 'ceph-mon', '--cluster', args.cluster, '--mkfs', '-i', hostname, '--monmap', monmap_path, '--keyring', keyring, ] + user_args ) logger.info('unlinking keyring file %s' % keyring) distro.conn.remote_module.unlink(keyring) # create the done file distro.conn.remote_module.create_done_path(done_path, uid, gid) # create init path distro.conn.remote_module.create_init_path(init_path, uid, gid) # start mon service start_mon_service(distro, args.cluster, hostname) def map_components(notsplit_packages, components): """ Returns a list of packages to install based on component names This is done by checking if a component is in notsplit_packages, if it is, we know we need to install 'ceph' instead of the raw component name. Essentially, this component hasn't been 'split' from the master 'ceph' package yet. """ packages = set() for c in components: if c in notsplit_packages: packages.add('ceph') else: packages.add(c) return list(packages) def start_mon_service(distro, cluster, hostname): """ start mon service depending on distro init """ if distro.init == 'sysvinit': service = distro.conn.remote_module.which_service() remoto.process.run( distro.conn, [ service, 'ceph', '-c', '/etc/ceph/{cluster}.conf'.format(cluster=cluster), 'start', 'mon.{hostname}'.format(hostname=hostname) ], timeout=7, ) system.enable_service(distro.conn) elif distro.init == 'upstart': remoto.process.run( distro.conn, [ 'initctl', 'emit', 'ceph-mon', 'cluster={cluster}'.format(cluster=cluster), 'id={hostname}'.format(hostname=hostname), ], timeout=7, ) elif distro.init == 'systemd': # enable ceph target for this host (in case it isn't already enabled) remoto.process.run( distro.conn, [ 'systemctl', 'enable', 'ceph.target' ], timeout=7, ) # enable and start this mon instance remoto.process.run( distro.conn, [ 'systemctl', 'enable', 'ceph-mon@{hostname}'.format(hostname=hostname), ], timeout=7, ) remoto.process.run( distro.conn, [ 'systemctl', 'start', 'ceph-mon@{hostname}'.format(hostname=hostname), ], timeout=7, ) ceph-deploy-2.0.1/ceph_deploy/hosts/debian/0000755000076500000240000000000013312242252021333 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/debian/__init__.py0000644000076500000240000000162712757133302023461 0ustar alfredostaff00000000000000from . import mon # noqa from .install import install, mirror_install, repo_install # noqa from .uninstall import uninstall # noqa from ceph_deploy.util import pkg_managers from ceph_deploy.util.system import is_systemd, is_upstart # Allow to set some information about this distro # distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ # Upstart checks first because when installing ceph, the # `/lib/systemd/system/ceph.target` file may be created, fooling this # detection mechanism. if is_upstart(module.conn): return 'upstart' if is_systemd(module.conn) or module.conn.remote_module.path_exists( "/lib/systemd/system/ceph.target"): return 'systemd' return 'sysvinit' def get_packager(module): return pkg_managers.Apt(module) ceph-deploy-2.0.1/ceph_deploy/hosts/debian/install.py0000644000076500000240000001045013277045417023371 0ustar alfredostaff00000000000000try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse import logging from ceph_deploy.util.paths import gpg from ceph_deploy.util import net LOG = logging.getLogger(__name__) def install(distro, version_kind, version, adjust_repos, **kw): packages = kw.pop('components', []) codename = distro.codename machine = distro.machine_type extra_install_flags = [] if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' distro.packager.clean() distro.packager.install(['ca-certificates', 'apt-transport-https']) if adjust_repos: # Wheezy does not like the download.ceph.com SSL cert protocol = 'https' if codename == 'wheezy': protocol = 'http' if version_kind in ['dev', 'dev_commit']: shaman_url = 'https://shaman.ceph.com/api/repos/ceph/{version}/{sha1}/{distro}/{distro_version}/repo/?arch={arch}'.format( distro=distro.normalized_name, distro_version=distro.codename, version=kw['args'].dev, sha1=kw['args'].dev_commit or 'latest', arch=machine ) LOG.debug('fetching repo information from: %s' % shaman_url) chacra_url = net.get_request(shaman_url).geturl() content = net.get_chacra_repo(shaman_url) # set the repo priority for the right domain fqdn = urlparse(chacra_url).hostname distro.conn.remote_module.set_apt_priority(fqdn) distro.conn.remote_module.write_sources_list_content(content) extra_install_flags = ['-o', 'Dpkg::Options::=--force-confnew', '--allow-unauthenticated'] else: distro.packager.add_repo_gpg_key(gpg.url(key, protocol=protocol)) if version_kind == 'stable': url = '{protocol}://download.ceph.com/debian-{version}/'.format( protocol=protocol, version=version, ) elif version_kind == 'testing': url = '{protocol}://download.ceph.com/debian-testing/'.format( protocol=protocol, ) else: raise RuntimeError('Unknown version kind: %r' % version_kind) # set the repo priority for the right domain fqdn = urlparse(url).hostname distro.conn.remote_module.set_apt_priority(fqdn) distro.conn.remote_module.write_sources_list(url, codename) extra_install_flags = ['-o', 'Dpkg::Options::=--force-confnew'] distro.packager.clean() # TODO this does not downgrade -- should it? if packages: distro.packager.install( packages, extra_install_flags=extra_install_flags ) def mirror_install(distro, repo_url, gpg_url, adjust_repos, **kw): packages = kw.pop('components', []) version_kind = kw['args'].version_kind repo_url = repo_url.strip('/') # Remove trailing slashes if adjust_repos: distro.packager.add_repo_gpg_key(gpg_url) # set the repo priority for the right domain fqdn = urlparse(repo_url).hostname distro.conn.remote_module.set_apt_priority(fqdn) distro.conn.remote_module.write_sources_list(repo_url, distro.codename) extra_install_flags = ['--allow-unauthenticated'] if version_kind in 'dev' else [] if packages: distro.packager.clean() distro.packager.install( packages, extra_install_flags=extra_install_flags) def repo_install(distro, repo_name, baseurl, gpgkey, **kw): packages = kw.pop('components', []) # Get some defaults safe_filename = '%s.list' % repo_name.replace(' ', '-') install_ceph = kw.pop('install_ceph', False) baseurl = baseurl.strip('/') # Remove trailing slashes distro.packager.add_repo_gpg_key(gpgkey) distro.conn.remote_module.write_sources_list( baseurl, distro.codename, safe_filename ) # set the repo priority for the right domain fqdn = urlparse(baseurl).hostname distro.conn.remote_module.set_apt_priority(fqdn) # repo is not operable until an update distro.packager.clean() if install_ceph and packages: distro.packager.install(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/debian/mon/0000755000076500000240000000000013312242252022124 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/debian/mon/__init__.py0000644000076500000240000000017613243310455024245 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/debian/uninstall.py0000644000076500000240000000054012620214647023725 0ustar alfredostaff00000000000000def uninstall(distro, purge=False): packages = [ 'ceph', 'ceph-mds', 'ceph-common', 'ceph-fs-common', 'radosgw', ] extra_remove_flags = [] if purge: extra_remove_flags.append('--purge') distro.packager.remove( packages, extra_remove_flags=extra_remove_flags ) ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/0000755000076500000240000000000013312242252021351 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/__init__.py0000644000076500000240000000137312754333353023502 0ustar alfredostaff00000000000000from . import mon # noqa from ceph_deploy.hosts.centos.install import repo_install # noqa from .install import install, mirror_install # noqa from .uninstall import uninstall # noqa from ceph_deploy.util import pkg_managers # Allow to set some information about this distro # distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): return 'sysvinit' return 'systemd' def get_packager(module): if module.normalized_release.int_major >= 22: return pkg_managers.DNF(module) else: return pkg_managers.Yum(module) ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/install.py0000644000076500000240000000612012754333353023404 0ustar alfredostaff00000000000000from ceph_deploy.lib import remoto from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa from ceph_deploy.util.paths import gpg from ceph_deploy.hosts.common import map_components NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] def install(distro, version_kind, version, adjust_repos, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) gpgcheck = kw.pop('gpgcheck', 1) logger = distro.conn.logger release = distro.release machine = distro.machine_type if version_kind in ['stable', 'testing']: key = 'release' else: key = 'autobuild' if adjust_repos: if distro.packager.name == 'yum': distro.packager.install('yum-plugin-priorities') # haven't been able to determine necessity of check_obsoletes with DNF distro.conn.remote_module.enable_yum_priority_obsoletes() logger.warning('check_obsoletes has been enabled for Yum priorities plugin') if version_kind in ['stable', 'testing']: distro.packager.add_repo_gpg_key(gpg.url(key)) if version_kind == 'stable': url = 'https://download.ceph.com/rpm-{version}/fc{release}/'.format( version=version, release=release, ) elif version_kind == 'testing': url = 'https://download.ceph.com/rpm-testing/fc{release}'.format( release=release, ) remoto.process.run( distro.conn, [ 'rpm', '-Uvh', '--replacepkgs', '--force', '--quiet', '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format( url=url, release=release, ), ] ) # set the right priority logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority') distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) logger.warning('altered ceph.repo priorities to contain: priority=1') elif version_kind in ['dev', 'dev_commit']: logger.info('skipping install of ceph-release package') logger.info('repo file will be created manually') mirror_install( distro, 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/{sub}/{version}/'.format( release=release.split(".", 1)[0], machine=machine, sub='ref' if version_kind == 'dev' else 'sha1', version=version), gpg.url(key), adjust_repos=True, extra_installs=False, gpgcheck=gpgcheck, ) else: raise Exception('unrecognized version_kind %s' % version_kind) distro.packager.install( packages ) ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/mon/0000755000076500000240000000000013312242252022142 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/mon/__init__.py0000644000076500000240000000017613243310455024263 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/fedora/uninstall.py0000644000076500000240000000024412620214647023744 0ustar alfredostaff00000000000000def uninstall(distro, purge=False): packages = [ 'ceph', 'ceph-common', 'ceph-radosgw', ] distro.packager.remove(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/remotes.py0000644000076500000240000003114213277045417022160 0ustar alfredostaff00000000000000try: import configparser except ImportError: import ConfigParser as configparser import errno import socket import os import shutil import tempfile import platform import re def platform_information(_linux_distribution=None): """ detect platform information from remote host """ linux_distribution = _linux_distribution or platform.linux_distribution distro, release, codename = linux_distribution() if not distro: distro, release, codename = parse_os_release() if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian debian_codenames = { '10': 'buster', '9': 'stretch', '8': 'jessie', '7': 'wheezy', '6': 'squeeze', } major_version = release.split('.')[0] codename = debian_codenames.get(major_version, '') # In order to support newer jessie/sid or wheezy/sid strings we test this # if sid is buried in the minor, we should use sid anyway. if not codename and '/' in release: major, minor = release.split('/') if minor == 'sid': codename = minor else: codename = major if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux codename = 'oracle' if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux codename = 'virtuozzo' if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux codename = 'arch' return ( str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip() ) def parse_os_release(release_path='/etc/os-release'): """ Extract (distro, release, codename) from /etc/os-release if present """ release_info = {} if os.path.isfile(release_path): for line in open(release_path, 'r').readlines(): line = line.strip() if line.startswith('#'): continue parts = line.split('=') if len(parts) != 2: continue release_info[parts[0].strip()] = parts[1].strip("\"'\n\t ") # In theory, we want ID/NAME, VERSION_ID and VERSION_CODENAME (with a # possible fallback to VERSION on the latter), based on information at: # https://www.freedesktop.org/software/systemd/man/os-release.html # However, after reviewing several distros /etc/os-release, getting # the codename is a bit of a mess. It's usually in parentheses in # VERSION, with some exceptions. distro = release_info.get('ID', '') release = release_info.get('VERSION_ID', '') codename = release_info.get('UBUNTU_CODENAME', release_info.get('VERSION', '')) match = re.match(r'^[^(]+ \(([^)]+)\)', codename) if match: codename = match.group(1).lower() if not codename and release_info.get('NAME', '') == 'openSUSE Tumbleweed': codename = 'tumbleweed' return (distro, release, codename) def machine_type(): """ detect machine type """ return platform.machine() def write_sources_list(url, codename, filename='ceph.list', mode=0o644): """add deb repo to /etc/apt/sources.list.d/""" repo_path = os.path.join('/etc/apt/sources.list.d', filename) content = 'deb {url} {codename} main\n'.format( url=url, codename=codename, ) write_file(repo_path, content.encode('utf-8'), mode) def write_sources_list_content(content, filename='ceph.list', mode=0o644): """add deb repo to /etc/apt/sources.list.d/ from content""" repo_path = os.path.join('/etc/apt/sources.list.d', filename) if not isinstance(content, str): content = content.decode('utf-8') write_file(repo_path, content.encode('utf-8'), mode) def write_yum_repo(content, filename='ceph.repo'): """add yum repo file in /etc/yum.repos.d/""" repo_path = os.path.join('/etc/yum.repos.d', filename) if not isinstance(content, str): content = content.decode('utf-8') write_file(repo_path, content.encode('utf-8')) def set_apt_priority(fqdn, path='/etc/apt/preferences.d/ceph.pref'): template = "Package: *\nPin: origin {fqdn}\nPin-Priority: 999\n" content = template.format(fqdn=fqdn) with open(path, 'w') as fout: fout.write(content) def set_repo_priority(sections, path='/etc/yum.repos.d/ceph.repo', priority='1'): Config = configparser.ConfigParser() Config.read(path) Config.sections() for section in sections: try: Config.set(section, 'priority', priority) except configparser.NoSectionError: # Emperor versions of Ceph used all lowercase sections # so lets just try again for the section that failed, maybe # we are able to find it if it is lower Config.set(section.lower(), 'priority', priority) with open(path, 'w') as fout: Config.write(fout) # And now, because ConfigParser is super duper, we need to remove the # assignments so this looks like it was before def remove_whitespace_from_assignments(): separator = "=" lines = open(path).readlines() fp = open(path, "w") for line in lines: line = line.strip() if not line.startswith("#") and separator in line: assignment = line.split(separator, 1) assignment = tuple(map(str.strip, assignment)) fp.write("%s%s%s\n" % (assignment[0], separator, assignment[1])) else: fp.write(line + "\n") remove_whitespace_from_assignments() def write_conf(cluster, conf, overwrite): """ write cluster configuration to /etc/ceph/{cluster}.conf """ path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) tmp_file = tempfile.NamedTemporaryFile('w', dir='/etc/ceph', delete=False) err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path if os.path.exists(path): with open(path, 'r') as f: old = f.read() if old != conf and not overwrite: raise RuntimeError(err_msg) tmp_file.write(conf) tmp_file.close() shutil.move(tmp_file.name, path) os.chmod(path, 0o644) return if os.path.exists('/etc/ceph'): with open(path, 'w') as f: f.write(conf) os.chmod(path, 0o644) else: err_msg = '/etc/ceph/ does not exist - could not write config' raise RuntimeError(err_msg) def write_keyring(path, key, uid=-1, gid=-1): """ create a keyring file """ # Note that we *require* to avoid deletion of the temp file # otherwise we risk not being able to copy the contents from # one file system to the other, hence the `delete=False` tmp_file = tempfile.NamedTemporaryFile('wb', delete=False) tmp_file.write(key) tmp_file.close() keyring_dir = os.path.dirname(path) if not path_exists(keyring_dir): makedir(keyring_dir, uid, gid) shutil.move(tmp_file.name, path) def create_mon_path(path, uid=-1, gid=-1): """create the mon path if it does not exist""" if not os.path.exists(path): os.makedirs(path) os.chown(path, uid, gid); def create_done_path(done_path, uid=-1, gid=-1): """create a done file to avoid re-doing the mon deployment""" with open(done_path, 'wb'): pass os.chown(done_path, uid, gid); def create_init_path(init_path, uid=-1, gid=-1): """create the init path if it does not exist""" if not os.path.exists(init_path): with open(init_path, 'wb'): pass os.chown(init_path, uid, gid); def append_to_file(file_path, contents): """append contents to file""" with open(file_path, 'a') as f: f.write(contents) def path_getuid(path): return os.stat(path).st_uid def path_getgid(path): return os.stat(path).st_gid def readline(path): with open(path) as _file: return _file.readline().strip('\n') def path_exists(path): return os.path.exists(path) def get_realpath(path): return os.path.realpath(path) def listdir(path): return os.listdir(path) def makedir(path, ignored=None, uid=-1, gid=-1): ignored = ignored or [] try: os.makedirs(path) except OSError as error: if error.errno in ignored: pass else: # re-raise the original exception raise else: os.chown(path, uid, gid); def unlink(_file): os.unlink(_file) def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1): """create the monitor keyring file""" write_file(keyring, monitor_keyring, 0o600, None, uid, gid) def write_file(path, content, mode=0o644, directory=None, uid=-1, gid=-1): if directory: if path.startswith("/"): path = path[1:] path = os.path.join(directory, path) if os.path.exists(path): # Delete file in case we are changing its mode os.unlink(path) with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode), 'wb') as f: f.write(content) os.chown(path, uid, gid) def touch_file(path): with open(path, 'wb') as f: # noqa pass def get_file(path): """ fetch remote file """ try: with open(path, 'rb') as f: return f.read() except IOError: pass def object_grep(term, file_object): for line in file_object.readlines(): if term in line: return True return False def grep(term, file_path): # A small grep-like function that will search for a word in a file and # return True if it does and False if it does not. # Implemented initially to have a similar behavior as the init system # detection in Ceph's init scripts:: # # detect systemd # # SYSTEMD=0 # grep -qs systemd /proc/1/comm && SYSTEMD=1 # .. note:: Because we intent to be operating in silent mode, we explicitly # return ``False`` if the file does not exist. if not os.path.isfile(file_path): return False with open(file_path) as _file: return object_grep(term, _file) def shortname(): """get remote short hostname""" return socket.gethostname().split('.', 1)[0] def which_service(): """ locating the `service` executable... """ # XXX This should get deprecated at some point. For now # it just bypasses and uses the new helper. return which('service') def which(executable): """find the location of an executable""" locations = ( '/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin', ) for location in locations: executable_path = os.path.join(location, executable) if os.path.exists(executable_path) and os.path.isfile(executable_path): return executable_path def make_mon_removed_dir(path, file_name): """ move old monitor data """ try: os.makedirs('/var/lib/ceph/mon-removed') except OSError as e: if e.errno != errno.EEXIST: raise shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) def safe_mkdir(path, uid=-1, gid=-1): """ create path if it doesn't exist """ try: os.mkdir(path) except OSError as e: if e.errno == errno.EEXIST: pass else: raise else: os.chown(path, uid, gid) def safe_makedirs(path, uid=-1, gid=-1): """ create path recursively if it doesn't exist """ try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST: pass else: raise else: os.chown(path, uid, gid) def zeroing(dev): """ zeroing last few blocks of device """ # this kills the crab # # sgdisk will wipe out the main copy of the GPT partition # table (sorry), but it doesn't remove the backup copies, and # subsequent commands will continue to complain and fail when # they see those. zeroing the last few blocks of the device # appears to do the trick. lba_size = 4096 size = 33 * lba_size return True with open(dev, 'wb') as f: f.seek(-size, os.SEEK_END) f.write(size*b'\0') def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"): """Configure Yum priorities to include obsoletes""" config = configparser.ConfigParser() config.read(path) config.set('main', 'check_obsoletes', '1') with open(path, 'w') as fout: config.write(fout) # remoto magic, needed to execute these functions remotely if __name__ == '__channelexec__': for item in channel: # noqa channel.send(eval(item)) # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/0000755000076500000240000000000013312242252021043 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/__init__.py0000644000076500000240000000142313243310455023160 0ustar alfredostaff00000000000000from . import mon # noqa from .install import install, mirror_install, repo_install # noqa from .uninstall import uninstall # noqa from ceph_deploy.util import pkg_managers from ceph_deploy.util.system import is_systemd # Allow to set some information about this distro # distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ if module.normalized_release.int_major < 7: return 'sysvinit' if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): return 'sysvinit' if is_systemd(module.conn): return 'systemd' return 'systemd' def get_packager(module): return pkg_managers.Yum(module) ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/install.py0000644000076500000240000000372012754333353023101 0ustar alfredostaff00000000000000from ceph_deploy.util import templates def install(distro, version_kind, version, adjust_repos, **kw): packages = kw.get('components', []) distro.packager.clean() distro.packager.install(packages) def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw): packages = kw.get('components', []) repo_url = repo_url.strip('/') # Remove trailing slashes gpgcheck = kw.pop('gpgcheck', 1) distro.packager.clean() if adjust_repos: distro.packager.add_repo_gpg_key(gpg_url) ceph_repo_content = templates.ceph_repo.format( repo_url=repo_url, gpg_url=gpg_url, gpgcheck=gpgcheck, ) distro.conn.remote_module.write_yum_repo(ceph_repo_content) if extra_installs and packages: distro.packager.install(packages) def repo_install(distro, reponame, baseurl, gpgkey, **kw): # do we have specific components to install? # removed them from `kw` so that we don't mess with other defaults packages = kw.pop('components', []) # Get some defaults name = kw.pop('name', '%s repo' % reponame) enabled = kw.pop('enabled', 1) gpgcheck = kw.pop('gpgcheck', 1) install_ceph = kw.pop('install_ceph', False) proxy = kw.pop('proxy', '') # will get ignored if empty _type = 'repo-md' baseurl = baseurl.strip('/') # Remove trailing slashes distro.packager.clean() if gpgkey: distro.packager.add_repo_gpg_key(gpgkey) repo_content = templates.custom_repo( reponame=reponame, name=name, baseurl=baseurl, enabled=enabled, gpgcheck=gpgcheck, _type=_type, gpgkey=gpgkey, proxy=proxy, **kw ) distro.conn.remote_module.write_yum_repo( repo_content, "%s.repo" % reponame ) # Some custom repos do not need to install ceph if install_ceph and packages: distro.packager.install(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/mon/0000755000076500000240000000000013312242252021634 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/mon/__init__.py0000644000076500000240000000017613243310455023755 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/rhel/uninstall.py0000644000076500000240000000034712620214647023442 0ustar alfredostaff00000000000000def uninstall(distro, purge=False): packages = [ 'ceph', 'ceph-common', 'ceph-mon', 'ceph-osd', 'ceph-radosgw' ] distro.packager.remove(packages) distro.packager.clean() ceph-deploy-2.0.1/ceph_deploy/hosts/suse/0000755000076500000240000000000013312242252021070 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/suse/__init__.py0000644000076500000240000000134012754333353023213 0ustar alfredostaff00000000000000from . import mon # noqa from .install import install, mirror_install, repo_install # noqa from .uninstall import uninstall # noqa import logging from ceph_deploy.util import pkg_managers # Allow to set some information about this distro # log = logging.getLogger(__name__) distro = None release = None codename = None def choose_init(module): """ Select a init system Returns the name of a init system (upstart, sysvinit ...). """ init_mapping = { '11' : 'sysvinit', # SLE_11 '12' : 'systemd', # SLE_12 '13.1' : 'systemd', # openSUSE_13.1 } return init_mapping.get(release, 'systemd') def get_packager(module): return pkg_managers.Zypper(module) ceph-deploy-2.0.1/ceph_deploy/hosts/suse/install.py0000644000076500000240000000500512754333353023124 0ustar alfredostaff00000000000000import logging from ceph_deploy.util import templates from ceph_deploy.lib import remoto from ceph_deploy.hosts.common import map_components LOG = logging.getLogger(__name__) NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] def install(distro, version_kind, version, adjust_repos, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.get('components', []) ) distro.packager.clean() if packages: distro.packager.install(packages) def mirror_install(distro, repo_url, gpg_url, adjust_repos, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.get('components', []) ) repo_url = repo_url.strip('/') # Remove trailing slashes gpg_url_path = gpg_url.split('file://')[-1] # Remove file if present gpgcheck = kw.pop('gpgcheck', 1) if adjust_repos: remoto.process.run( distro.conn, [ 'rpm', '--import', gpg_url_path, ] ) ceph_repo_content = templates.zypper_repo.format( repo_url=repo_url, gpg_url=gpg_url, gpgcheck=gpgcheck, ) distro.conn.remote_module.write_file( '/etc/zypp/repos.d/ceph.repo', ceph_repo_content.encode('utf-8')) distro.packager.clean() if packages: distro.packager.install(packages) def repo_install(distro, reponame, baseurl, gpgkey, **kw): packages = map_components( NON_SPLIT_PACKAGES, kw.pop('components', []) ) # Get some defaults name = kw.get('name', '%s repo' % reponame) enabled = kw.get('enabled', 1) gpgcheck = kw.get('gpgcheck', 1) install_ceph = kw.pop('install_ceph', False) proxy = kw.get('proxy') _type = 'repo-md' baseurl = baseurl.strip('/') # Remove trailing slashes if gpgkey: remoto.process.run( distro.conn, [ 'rpm', '--import', gpgkey, ] ) repo_content = templates.custom_repo( reponame=reponame, name = name, baseurl = baseurl, enabled = enabled, gpgcheck = gpgcheck, _type = _type, gpgkey = gpgkey, proxy = proxy, ) distro.conn.remote_module.write_file( '/etc/zypp/repos.d/%s' % (reponame), repo_content.encode('utf-8') ) # Some custom repos do not need to install ceph if install_ceph and packages: distro.packager.install(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/suse/mon/0000755000076500000240000000000013312242252021661 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/hosts/suse/mon/__init__.py0000644000076500000240000000017613243310455024002 0ustar alfredostaff00000000000000from ceph_deploy.hosts.common import mon_add as add # noqa from ceph_deploy.hosts.common import mon_create as create # noqa ceph-deploy-2.0.1/ceph_deploy/hosts/suse/uninstall.py0000644000076500000240000000034112620214647023461 0ustar alfredostaff00000000000000def uninstall(distro, purge=False): packages = [ 'ceph', 'ceph-common', 'libcephfs1', 'librados2', 'librbd1', 'ceph-radosgw', ] distro.packager.remove(packages) ceph-deploy-2.0.1/ceph_deploy/hosts/util.py0000644000076500000240000000207512620214647021454 0ustar alfredostaff00000000000000""" A utility module that can host utilities that will be used by more than one type of distro and not common to all of them """ from ceph_deploy.util import pkg_managers def install_yum_priorities(distro, _yum=None): """ EPEL started packaging Ceph so we need to make sure that the ceph.repo we install has a higher priority than the EPEL repo so that when installing Ceph it will come from the repo file we create. The name of the package changed back and forth (!) since CentOS 4: From the CentOS wiki:: Note: This plugin has carried at least two differing names over time. It is named yum-priorities on CentOS-5 but was named yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to yum-plugin-priorities. :params _yum: Used for testing, so we can inject a fake yum """ yum = _yum or pkg_managers.yum package_name = 'yum-plugin-priorities' if distro.normalized_name == 'centos': if distro.release[0] != '6': package_name = 'yum-priorities' yum(distro.conn, package_name) ceph-deploy-2.0.1/ceph_deploy/install.py0000644000076500000240000004751213312241405021001 0ustar alfredostaff00000000000000import argparse import logging import os from ceph_deploy import hosts from ceph_deploy.cliutil import priority from ceph_deploy.lib import remoto from ceph_deploy.util.constants import default_components from ceph_deploy.util.paths import gpg LOG = logging.getLogger(__name__) def sanitize_args(args): """ args may need a bunch of logic to set proper defaults that argparse is not well suited for. """ if args.release is None: args.release = 'mimic' args.default_release = True # XXX This whole dance is because --stable is getting deprecated if args.stable is not None: LOG.warning('the --stable flag is deprecated, use --release instead') args.release = args.stable # XXX Tango ends here. return args def detect_components(args, distro): """ Since the package split, now there are various different Ceph components to install like: * ceph * ceph-mon * ceph-mgr * ceph-osd * ceph-mds This helper function should parse the args that may contain specifics about these flags and return the default if none are passed in (which is, install everything) """ # the flag that prevents all logic here is the `--repo` flag which is used # when no packages should be installed, just the repo files, so check for # that here and return an empty list (which is equivalent to say 'no # packages should be installed') if args.repo: return [] flags = { 'install_osd': 'ceph-osd', 'install_rgw': 'ceph-radosgw', 'install_mds': 'ceph-mds', 'install_mon': 'ceph-mon', 'install_mgr': 'ceph-mgr', 'install_common': 'ceph-common', 'install_tests': 'ceph-test', } if distro.is_rpm: defaults = default_components.rpm elif distro.is_pkgtarxz: # archlinux doesn't have components! flags = { 'install_osd': 'ceph', 'install_rgw': 'ceph', 'install_mds': 'ceph', 'install_mon': 'ceph', 'install_mgr': 'ceph', 'install_common': 'ceph', 'install_tests': 'ceph', } defaults = default_components.pkgtarxz else: defaults = default_components.deb # different naming convention for deb than rpm for radosgw flags['install_rgw'] = 'radosgw' if args.install_all: return defaults else: components = [] for k, v in flags.items(): if getattr(args, k, False): components.append(v) # if we have some components selected from flags then return that, # otherwise return defaults because no flags and no `--repo` means we # should get all of them by default return components or defaults def install(args): args = sanitize_args(args) if args.repo: return install_repo(args) gpgcheck = 0 if args.nogpgcheck else 1 if args.version_kind == 'stable': version = args.release else: version = getattr(args, args.version_kind) version_str = args.version_kind if version: version_str += ' version {version}'.format(version=version) LOG.debug( 'Installing %s on cluster %s hosts %s', version_str, args.cluster, ' '.join(args.host), ) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username, # XXX this should get removed once ceph packages are split for # upstream. If default_release is True, it means that the user is # trying to install on a RHEL machine and should expect to get RHEL # packages. Otherwise, it will need to specify either a specific # version, or repo, or a development branch. Other distro users # should not see any differences. use_rhceph=args.default_release, ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) components = detect_components(args, distro) if distro.init == 'sysvinit' and args.cluster != 'ceph': LOG.error('refusing to install on host: %s, with custom cluster name: %s' % ( hostname, args.cluster, ) ) LOG.error('custom cluster names are not supported on sysvinit hosts') continue rlogger = logging.getLogger(hostname) rlogger.info('installing Ceph on %s' % hostname) cd_conf = getattr(args, 'cd_conf', None) # custom repo arguments repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url gpg_fallback = gpg.url('release') if gpg_url is None and repo_url: LOG.warning('--gpg-url was not used, will fallback') LOG.warning('using GPG fallback: %s', gpg_fallback) gpg_url = gpg_fallback if args.local_mirror: if args.username: hostname = "%s@%s" % (args.username, hostname) remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True) repo_url = 'file:///opt/ceph-deploy/repo' gpg_url = 'file:///opt/ceph-deploy/repo/release.asc' if repo_url: # triggers using a custom repository # the user used a custom repo url, this should override anything # we can detect from the configuration, so warn about it if cd_conf: if cd_conf.get_default_repo(): rlogger.warning('a default repo was found but it was \ overridden on the CLI') if args.release in cd_conf.get_repos(): rlogger.warning('a custom repo was found but it was \ overridden on the CLI') rlogger.info('using custom repository location: %s', repo_url) distro.mirror_install( distro, repo_url, gpg_url, args.adjust_repos, components=components, gpgcheck=gpgcheck, args=args ) # Detect and install custom repos here if needed elif should_use_custom_repo(args, cd_conf, repo_url): LOG.info('detected valid custom repositories from config file') custom_repo(distro, args, cd_conf, rlogger) else: # otherwise a normal installation distro.install( distro, args.version_kind, version, args.adjust_repos, components=components, gpgcheck = gpgcheck, args=args ) # Check the ceph version we just installed hosts.common.ceph_version(distro.conn) distro.conn.exit() def should_use_custom_repo(args, cd_conf, repo_url): """ A boolean to determine the logic needed to proceed with a custom repo installation instead of cramming everything nect to the logic operator. """ if repo_url: # repo_url signals a CLI override, return False immediately return False if cd_conf: if cd_conf.has_repos: has_valid_release = args.release in cd_conf.get_repos() has_default_repo = cd_conf.get_default_repo() if has_valid_release or has_default_repo: return True return False def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None): """ A custom repo install helper that will go through config checks to retrieve repos (and any extra repos defined) and install those ``cd_conf`` is the object built from argparse that holds the flags and information needed to determine what metadata from the configuration to be used. """ default_repo = cd_conf.get_default_repo() components = detect_components(args, distro) if args.release in cd_conf.get_repos(): LOG.info('will use repository from conf: %s' % args.release) default_repo = args.release elif default_repo: LOG.info('will use default repository: %s' % default_repo) # At this point we know there is a cd_conf and that it has custom # repos make sure we were able to detect and actual repo if not default_repo: LOG.warning('a ceph-deploy config was found with repos \ but could not default to one') else: options = dict(cd_conf.items(default_repo)) options['install_ceph'] = False if install_ceph is False else True extra_repos = cd_conf.get_list(default_repo, 'extra-repos') rlogger.info('adding custom repository file') try: distro.repo_install( distro, default_repo, options.pop('baseurl'), options.pop('gpgkey'), components=components, **options ) except KeyError as err: raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo)) for xrepo in extra_repos: rlogger.info('adding extra repo file: %s.repo' % xrepo) options = dict(cd_conf.items(xrepo)) try: distro.repo_install( distro, xrepo, options.pop('baseurl'), options.pop('gpgkey'), components=components, **options ) except KeyError as err: raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo)) def install_repo(args): """ For a user that only wants to install the repository only (and avoid installing Ceph and its dependencies). """ cd_conf = getattr(args, 'cd_conf', None) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username, # XXX this should get removed once Ceph packages are split for # upstream. If default_release is True, it means that the user is # trying to install on a RHEL machine and should expect to get RHEL # packages. Otherwise, it will need to specify either a specific # version, or repo, or a development branch. Other distro users should # not see any differences. use_rhceph=args.default_release, ) rlogger = logging.getLogger(hostname) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) custom_repo(distro, args, cd_conf, rlogger, install_ceph=False) def remove(args, purge): LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm') LOG.info('like: librbd1 and librados2') remove_action = 'Uninstalling' if purge: remove_action = 'Purging' LOG.debug( '%s on cluster %s hosts %s', remove_action, args.cluster, ' '.join(args.host), ) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username, use_rhceph=True) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(hostname) rlogger.info('%s Ceph on %s' % (remove_action, hostname)) distro.uninstall(distro, purge=purge) distro.conn.exit() def uninstall(args): remove(args, False) def purge(args): remove(args, True) def purgedata(args): LOG.debug( 'Purging data from cluster %s hosts %s', args.cluster, ' '.join(args.host), ) installed_hosts = [] for hostname in args.host: distro = hosts.get(hostname, username=args.username) ceph_is_installed = distro.conn.remote_module.which('ceph') if ceph_is_installed: installed_hosts.append(hostname) distro.conn.exit() if installed_hosts: LOG.error("Ceph is still installed on: %s", installed_hosts) raise RuntimeError("refusing to purge data while Ceph is still installed") for hostname in args.host: distro = hosts.get(hostname, username=args.username) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) rlogger = logging.getLogger(hostname) rlogger.info('purging data on %s' % hostname) # Try to remove the contents of /var/lib/ceph first, don't worry # about errors here, we deal with them later on remoto.process.check( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', ] ) # If we failed in the previous call, then we probably have OSDs # still mounted, so we unmount them here if distro.conn.remote_module.path_exists('/var/lib/ceph'): rlogger.warning( 'OSDs may still be mounted, trying to unmount them' ) remoto.process.run( distro.conn, [ 'find', '/var/lib/ceph', '-mindepth', '1', '-maxdepth', '2', '-type', 'd', '-exec', 'umount', '{}', ';', ] ) # And now we try again to remove the contents, since OSDs should be # unmounted, but this time we do check for errors remoto.process.run( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', ] ) remoto.process.run( distro.conn, [ 'rm', '-rf', '--one-file-system', '--', '/etc/ceph/', ] ) distro.conn.exit() class StoreVersion(argparse.Action): """ Like ``"store"`` but also remember which one of the exclusive options was set. There are three kinds of versions: stable, testing and dev. This sets ``version_kind`` to be the right one of the above. This kludge essentially lets us differentiate explicitly set values from defaults. """ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) if self.dest == 'release': self.dest = 'stable' namespace.version_kind = self.dest @priority(20) def make(parser): """ Install Ceph packages on remote hosts. """ version = parser.add_mutually_exclusive_group() # XXX deprecated in favor of release version.add_argument( '--stable', nargs='?', action=StoreVersion, metavar='CODENAME', help='[DEPRECATED] install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--release', nargs='?', action=StoreVersion, metavar='CODENAME', help='install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--testing', nargs=0, action=StoreVersion, help='install the latest development release', ) version.add_argument( '--dev', nargs='?', action=StoreVersion, const='master', metavar='BRANCH_OR_TAG', help='install a bleeding edge build from Git branch\ or tag (default: %(default)s)', ) parser.add_argument( '--dev-commit', nargs='?', action=StoreVersion, metavar='COMMIT', help='install a bleeding edge build from Git commit (defaults to master branch)', ) version.set_defaults( stable=None, # XXX deprecated in favor of release release=None, # Set the default release in sanitize_args() dev='master', version_kind='stable', ) parser.add_argument( '--mon', dest='install_mon', action='store_true', help='install the mon component only', ) parser.add_argument( '--mgr', dest='install_mgr', action='store_true', help='install the mgr component only', ) parser.add_argument( '--mds', dest='install_mds', action='store_true', help='install the mds component only', ) parser.add_argument( '--rgw', dest='install_rgw', action='store_true', help='install the rgw component only', ) parser.add_argument( '--osd', dest='install_osd', action='store_true', help='install the osd component only', ) parser.add_argument( '--tests', dest='install_tests', action='store_true', help='install the testing components', ) parser.add_argument( '--cli', '--common', dest='install_common', action='store_true', help='install the common component only', ) parser.add_argument( '--all', dest='install_all', action='store_true', help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default', ) repo = parser.add_mutually_exclusive_group() repo.add_argument( '--adjust-repos', dest='adjust_repos', action='store_true', help='install packages modifying source repos', ) repo.add_argument( '--no-adjust-repos', dest='adjust_repos', action='store_false', help='install packages without modifying source repos', ) repo.add_argument( '--repo', action='store_true', help='install repo files only (skips package installation)', ) repo.set_defaults( adjust_repos=True, ) parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to install on', ) parser.add_argument( '--local-mirror', nargs='?', const='PATH', default=None, help='Fetch packages and push them to hosts for a local repo mirror', ) parser.add_argument( '--repo-url', nargs='?', dest='repo_url', help='specify a repo URL that mirrors/contains Ceph packages', ) parser.add_argument( '--gpg-url', nargs='?', dest='gpg_url', help='specify a GPG key URL to be used with custom repos\ (defaults to ceph.com)' ) parser.add_argument( '--nogpgcheck', action='store_true', help='install packages without gpgcheck', ) parser.set_defaults( func=install, ) @priority(80) def make_uninstall(parser): """ Remove Ceph packages from remote hosts. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to uninstall Ceph from', ) parser.set_defaults( func=uninstall, ) @priority(80) def make_purge(parser): """ Remove Ceph packages from remote hosts and purge all data. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph from', ) parser.set_defaults( func=purge, ) @priority(80) def make_purge_data(parser): """ Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph data from', ) parser.set_defaults( func=purgedata, ) ceph-deploy-2.0.1/ceph_deploy/lib/0000755000076500000240000000000013312242252017517 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/__init__.py0000644000076500000240000000146112754333353021646 0ustar alfredostaff00000000000000""" This module is meant for vendorizing Python libraries. Most libraries will need to have some ``sys.path`` alterations done unless they are doing relative imports. Do **not** add anything to this module that does not represent a vendorized library. Vendored libraries should go into the ``vendor`` directory and imported from there. This is so we allow libraries that are installed normally to be imported if the vendored module is not available. The import dance here is done so that all other imports throught ceph-deploy are kept the same regardless of where the module comes from. The expected way to import remoto would look like this:: from ceph_deploy.lib import remoto """ try: # vendored from .vendor import remoto except ImportError: # normally installed import remoto # noqa ceph-deploy-2.0.1/ceph_deploy/lib/vendor/0000755000076500000240000000000013312242252021014 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/__init__.py0000644000076500000240000000000012620214647023123 0ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/0000755000076500000240000000000013312242252022321 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/__init__.py0000644000076500000240000000021012754342135024435 0ustar alfredostaff00000000000000from .connection import Connection from .file_sync import rsync from . import process from . import connection __version__ = '0.0.29' ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/connection.py0000644000076500000240000001230512754342135025045 0ustar alfredostaff00000000000000import socket import sys from .lib import execnet # # Connection Object # class Connection(object): def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True, detect_sudo=False, interpreter=None): self.sudo = sudo self.hostname = hostname self.logger = logger or FakeRemoteLogger() self.remote_module = None self.channel = None self.global_timeout = None # wait for ever self.interpreter = interpreter or 'python%s' % sys.version_info[0] if eager: try: if detect_sudo: self.sudo = self._detect_sudo() self.gateway = self._make_gateway(hostname) except OSError: self.logger.error( "Can't communicate with remote host, possibly because " "%s is not installed there" % self.interpreter ) raise def _make_gateway(self, hostname): gateway = execnet.makegateway( self._make_connection_string(hostname) ) gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False) return gateway def _detect_sudo(self, _execnet=None): """ ``sudo`` detection has to create a different connection to the remote host so that we can reliably ensure that ``getuser()`` will return the right information. After getting the user info it closes the connection and returns a boolean """ exc = _execnet or execnet gw = exc.makegateway( self._make_connection_string(self.hostname, use_sudo=False) ) channel = gw.remote_exec( 'import getpass; channel.send(getpass.getuser())' ) result = channel.receive() gw.exit() if result == 'root': return False self.logger.debug('connection detected need for sudo') return True def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None): _needs_ssh = _needs_ssh or needs_ssh interpreter = self.interpreter if use_sudo is not None: if use_sudo: interpreter = 'sudo ' + interpreter elif self.sudo: interpreter = 'sudo ' + interpreter if _needs_ssh(hostname): return 'ssh=%s//python=%s' % (hostname, interpreter) return 'popen//python=%s' % interpreter def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.exit() return False def execute(self, function, **kw): return self.gateway.remote_exec(function, **kw) def exit(self): self.gateway.exit() def import_module(self, module): self.remote_module = ModuleExecute(self.gateway, module, self.logger) return self.remote_module class ModuleExecute(object): def __init__(self, gateway, module, logger=None): self.channel = gateway.remote_exec(module) self.module = module self.logger = logger def __getattr__(self, name): if not hasattr(self.module, name): msg = "module %s does not have attribute %s" % (str(self.module), name) raise AttributeError(msg) docstring = self._get_func_doc(getattr(self.module, name)) def wrapper(*args): arguments = self._convert_args(args) if docstring: self.logger.debug(docstring) self.channel.send("%s(%s)" % (name, arguments)) try: return self.channel.receive() except Exception as error: # Error will come as a string of a traceback, remove everything # up to the actual exception since we do get garbage otherwise # that points to non-existent lines in the compiled code for tb_line in reversed(str(error).split('\n')): if tb_line: exc_line = tb_line break raise RuntimeError(exc_line) return wrapper def _get_func_doc(self, func): try: return getattr(func, 'func_doc').strip() except AttributeError: return '' def _convert_args(self, args): if args: if len(args) > 1: arguments = str(args).rstrip(')').lstrip('(') else: arguments = str(args).rstrip(',)').lstrip('(') else: arguments = '' return arguments # # FIXME this is getting ridiculous # class FakeRemoteLogger: def error(self, *a, **kw): pass def debug(self, *a, **kw): pass def info(self, *a, **kw): pass def warning(self, *a, **kw): pass def needs_ssh(hostname, _socket=None): """ Obtains remote hostname of the socket and cuts off the domain part of its FQDN. """ _socket = _socket or socket fqdn = _socket.getfqdn() if hostname == fqdn: return False local_hostname = _socket.gethostname() local_short_hostname = local_hostname.split('.')[0] if local_hostname == hostname or local_short_hostname == hostname: return False return True ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/exc.py0000644000076500000240000000025612754342135023467 0ustar alfredostaff00000000000000from .lib import execnet HostNotFound = execnet.HostNotFound RemoteError = execnet.RemoteError TimeoutError = execnet.TimeoutError DataFormatError = execnet.DataFormatError ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/file_sync.py0000644000076500000240000000252512754342135024664 0ustar alfredostaff00000000000000from .lib import execnet from .connection import Connection, FakeRemoteLogger class _RSync(execnet.RSync): """ Inherits from ``execnet.RSync`` so that we can log nicely with the user logger instance (if any) back with the ``_report_send_file`` method """ def __init__(self, sourcedir, callback=None, verbose=True, logger=None): self.logger = logger super(_RSync, self).__init__(sourcedir, callback, verbose) def _report_send_file(self, gateway, modified_rel_path): if self._verbose: self.logger.info("syncing file: %s" % modified_rel_path) def rsync(hosts, source, destination, logger=None, sudo=False): """ Grabs the hosts (or single host), creates the connection object for each and set the rsync execnet engine to push the files. It assumes that all of the destinations for the different hosts is the same. This deviates from what execnet does because it has the flexibility to push to different locations. """ logger = logger or FakeRemoteLogger() sync = _RSync(source, logger=logger) # setup_targets if not isinstance(hosts, list): hosts = [hosts] for host in hosts: conn = Connection( host, logger, sudo, ) sync.add_target(conn.gateway, destination) return sync.send() ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/0000755000076500000240000000000013312242252023067 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/__init__.py0000644000076500000240000000175612754342135025223 0ustar alfredostaff00000000000000""" This module is meant for vendorizing Python libraries. Most libraries will need to have some ``sys.path`` alterations done unless they are doing relative imports. Do **not** add anything to this module that does not represent a vendorized library. Vendored libraries should go into the ``vendor`` directory and imported from there. This is so we allow libraries that are installed normally to be imported if the vendored module is not available. The import dance here is done so that all other imports throught ceph-deploy are kept the same regardless of where the module comes from. The expected way to import execnet would look like this:: from remoto.lib import execnet """ import sys import os this_dir = os.path.abspath(os.path.dirname(__file__)) vendor_dir = os.path.join(this_dir, 'vendor') try: # vendored if vendor_dir not in sys.path: sys.path.insert(0, vendor_dir) import execnet except ImportError as err: # normally installed import execnet # noqa ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/0000755000076500000240000000000013312242252024364 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/__init__.py0000644000076500000240000000000012754342135026475 0ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/0000755000076500000240000000000013312242252026017 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/__init__.py0000644000076500000240000000206112754342136030142 0ustar alfredostaff00000000000000""" execnet: pure python lib for connecting to local and remote Python Interpreters. (c) 2012, Holger Krekel and others """ __version__ = '1.2.0' from . import apipkg apipkg.initpkg(__name__, { 'PopenGateway': '.deprecated:PopenGateway', 'SocketGateway': '.deprecated:SocketGateway', 'SshGateway': '.deprecated:SshGateway', 'makegateway': '.multi:makegateway', 'set_execmodel': '.multi:set_execmodel', 'HostNotFound': '.gateway_bootstrap:HostNotFound', 'RemoteError': '.gateway_base:RemoteError', 'TimeoutError': '.gateway_base:TimeoutError', 'XSpec': '.xspec:XSpec', 'Group': '.multi:Group', 'MultiChannel': '.multi:MultiChannel', 'RSync': '.rsync:RSync', 'default_group': '.multi:default_group', 'dumps': '.gateway_base:dumps', 'loads': '.gateway_base:loads', 'load': '.gateway_base:load', 'dump': '.gateway_base:dump', 'DataFormatError': '.gateway_base:DataFormatError', }) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/apipkg.py0000644000076500000240000001264012754342136027662 0ustar alfredostaff00000000000000""" apipkg: control the exported namespace of a python package. see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ import os import sys from types import ModuleType __version__ = '1.2' def initpkg(pkgname, exportdefs, attr=dict()): """ initialize given package from the export definitions. """ oldmod = sys.modules.get(pkgname) d = {} f = getattr(oldmod, '__file__', None) if f: f = os.path.abspath(f) d['__file__'] = f if hasattr(oldmod, '__version__'): d['__version__'] = oldmod.__version__ if hasattr(oldmod, '__loader__'): d['__loader__'] = oldmod.__loader__ if hasattr(oldmod, '__path__'): d['__path__'] = [os.path.abspath(p) for p in oldmod.__path__] if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): d['__doc__'] = oldmod.__doc__ d.update(attr) if hasattr(oldmod, "__dict__"): oldmod.__dict__.update(d) mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval class ApiModule(ModuleType): def __docget(self): try: return self.__doc except AttributeError: if '__doc__' in self.__map__: return self.__makeattr('__doc__') def __docset(self, value): self.__doc = value __doc__ = property(__docget, __docset) def __init__(self, name, importspec, implprefix=None, attr=None): self.__name__ = name self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] self.__map__ = {} self.__implprefix__ = implprefix or name if attr: for name, val in attr.items(): #print "setting", self.__name__, name, val setattr(self, name, val) for name, importspec in importspec.items(): if isinstance(importspec, dict): subname = '%s.%s'%(self.__name__, name) apimod = ApiModule(subname, importspec, implprefix) sys.modules[subname] = apimod setattr(self, name, apimod) else: parts = importspec.split(':') modpath = parts.pop(0) attrname = parts and parts[0] or "" if modpath[0] == '.': modpath = implprefix + modpath if not attrname: subname = '%s.%s'%(self.__name__, name) apimod = AliasModule(subname, modpath) sys.modules[subname] = apimod if '.' not in name: setattr(self, name, apimod) else: self.__map__[name] = (modpath, attrname) def __repr__(self): l = [] if hasattr(self, '__version__'): l.append("version=" + repr(self.__version__)) if hasattr(self, '__file__'): l.append('from ' + repr(self.__file__)) if l: return '' % (self.__name__, " ".join(l)) return '' % (self.__name__,) def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown.""" #print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') importobj(*target)() try: modpath, attrname = self.__map__[name] except KeyError: if target is not None and name != '__onfirstaccess__': # retry, onfirstaccess might have set attrs return getattr(self, name) raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) try: del self.__map__[name] except KeyError: pass # in a recursive-import situation a double-del can happen return result __getattr__ = __makeattr def __dict__(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] dict = dictdescr.__get__(self) if dict is not None: hasattr(self, 'some') for name in self.__all__: try: self.__makeattr(name) except AttributeError: pass return dict __dict__ = property(__dict__) def AliasModule(modname, modpath, attrname=None): mod = [] def getmod(): if not mod: x = importobj(modpath, None) if attrname is not None: x = getattr(x, attrname) mod.append(x) return mod[0] class AliasModule(ModuleType): def __repr__(self): x = modpath if attrname: x += "." + attrname return '' % (modname, x) def __getattribute__(self, name): return getattr(getmod(), name) def __setattr__(self, name, value): setattr(getmod(), name, value) def __delattr__(self, name): delattr(getmod(), name) return AliasModule(modname) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/deprecated.py0000644000076500000240000000302212754342136030501 0ustar alfredostaff00000000000000""" some deprecated calls (c) 2008-2009, Holger Krekel and others """ import execnet def PopenGateway(python=None): """ instantiate a gateway to a subprocess started with the given 'python' executable. """ APIWARN("1.0.0b4", "use makegateway('popen')") spec = execnet.XSpec("popen") spec.python = python return execnet.default_group.makegateway(spec) def SocketGateway(host, port): """ This Gateway provides interaction with a remote process by connecting to a specified socket. On the remote side you need to manually start a small script (py/execnet/script/socketserver.py) that accepts SocketGateway connections or use the experimental new_remote() method on existing gateways. """ APIWARN("1.0.0b4", "use makegateway('socket=host:port')") spec = execnet.XSpec("socket=%s:%s" %(host, port)) return execnet.default_group.makegateway(spec) def SshGateway(sshaddress, remotepython=None, ssh_config=None): """ instantiate a remote ssh process with the given 'sshaddress' and remotepython version. you may specify an ssh_config file. """ APIWARN("1.0.0b4", "use makegateway('ssh=host')") spec = execnet.XSpec("ssh=%s" % sshaddress) spec.python = remotepython spec.ssh_config = ssh_config return execnet.default_group.makegateway(spec) def APIWARN(version, msg, stacklevel=3): import warnings Warn = DeprecationWarning("(since version %s) %s" %(version, msg)) warnings.warn(Warn, stacklevel=stacklevel) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway.py0000644000076500000240000001564212754342136030055 0ustar alfredostaff00000000000000""" gateway code for initiating popen, socket and ssh connections. (c) 2004-2013, Holger Krekel and others """ import sys, os, inspect, types, linecache import textwrap import execnet from execnet.gateway_base import Message from execnet import gateway_base importdir = os.path.dirname(os.path.dirname(execnet.__file__)) class Gateway(gateway_base.BaseGateway): """ Gateway to a local or remote Python Intepreter. """ def __init__(self, io, spec): super(Gateway, self).__init__(io=io, id=spec.id, _startcount=1) self.spec = spec self._initreceive() @property def remoteaddress(self): return self._io.remoteaddress def __repr__(self): """ return string representing gateway type and status. """ try: r = (self.hasreceiver() and 'receive-live' or 'not-receiving') i = len(self._channelfactory.channels()) except AttributeError: r = "uninitialized" i = "no" return "<%s id=%r %s, %s model, %s active channels>" %( self.__class__.__name__, self.id, r, self.execmodel.backend, i) def exit(self): """ trigger gateway exit. Defer waiting for finishing of receiver-thread and subprocess activity to when group.terminate() is called. """ self._trace("gateway.exit() called") if self not in self._group: self._trace("gateway already unregistered with group") return self._group._unregister(self) try: self._trace("--> sending GATEWAY_TERMINATE") self._send(Message.GATEWAY_TERMINATE) self._trace("--> io.close_write") self._io.close_write() except (ValueError, EOFError, IOError): v = sys.exc_info()[1] self._trace("io-error: could not send termination sequence") self._trace(" exception: %r" % v) def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): """ set the string coercion for this gateway the default is to try to convert py2 str as py3 str, but not to try and convert py3 str to py2 str """ self._strconfig = (py2str_as_py3str, py3str_as_py2str) data = gateway_base.dumps_internal(self._strconfig) self._send(Message.RECONFIGURE, data=data) def _rinfo(self, update=False): """ return some sys/env information from remote. """ if update or not hasattr(self, '_cache_rinfo'): ch = self.remote_exec(rinfo_source) self._cache_rinfo = RInfo(ch.receive()) return self._cache_rinfo def hasreceiver(self): """ return True if gateway is able to receive data. """ return self._receivepool.active_count() > 0 def remote_status(self): """ return information object about remote execution status. """ channel = self.newchannel() self._send(Message.STATUS, channel.id) statusdict = channel.receive() # the other side didn't actually instantiate a channel # so we just delete the internal id/channel mapping self._channelfactory._local_close(channel.id) return RemoteStatus(statusdict) def remote_exec(self, source, **kwargs): """ return channel object and connect it to a remote execution thread where the given ``source`` executes. * ``source`` is a string: execute source string remotely with a ``channel`` put into the global namespace. * ``source`` is a pure function: serialize source and call function with ``**kwargs``, adding a ``channel`` object to the keyword arguments. * ``source`` is a pure module: execute source of module with a ``channel`` in its global namespace In all cases the binding ``__name__='__channelexec__'`` will be available in the global namespace of the remotely executing code. """ call_name = None if isinstance(source, types.ModuleType): linecache.updatecache(inspect.getsourcefile(source)) source = inspect.getsource(source) elif isinstance(source, types.FunctionType): call_name = source.__name__ source = _source_of_function(source) else: source = textwrap.dedent(str(source)) if call_name is None and kwargs: raise TypeError("can't pass kwargs to non-function remote_exec") channel = self.newchannel() self._send(Message.CHANNEL_EXEC, channel.id, gateway_base.dumps_internal((source, call_name, kwargs))) return channel def remote_init_threads(self, num=None): """ DEPRECATED. Is currently a NO-OPERATION already.""" print ("WARNING: remote_init_threads() is a no-operation in execnet-1.2") class RInfo: def __init__(self, kwargs): self.__dict__.update(kwargs) def __repr__(self): info = ", ".join(["%s=%s" % item for item in self.__dict__.items()]) return "" % info RemoteStatus = RInfo def rinfo_source(channel): import sys, os channel.send(dict( executable = sys.executable, version_info = sys.version_info[:5], platform = sys.platform, cwd = os.getcwd(), pid = os.getpid(), )) def _find_non_builtin_globals(source, codeobj): try: import ast except ImportError: return None try: import __builtin__ except ImportError: import builtins as __builtin__ vars = dict.fromkeys(codeobj.co_varnames) all = [] for node in ast.walk(ast.parse(source)): if (isinstance(node, ast.Name) and node.id not in vars and node.id not in __builtin__.__dict__): all.append(node.id) return all def _source_of_function(function): if function.__name__ == '': raise ValueError("can't evaluate lambda functions'") #XXX: we dont check before remote instanciation # if arguments are used propperly args, varargs, keywords, defaults = inspect.getargspec(function) if args[0] != 'channel': raise ValueError('expected first function argument to be `channel`') if sys.version_info < (3,0): closure = function.func_closure codeobj = function.func_code else: closure = function.__closure__ codeobj = function.__code__ if closure is not None: raise ValueError("functions with closures can't be passed") try: source = inspect.getsource(function) except IOError: raise ValueError("can't find source file for %s" % function) source = textwrap.dedent(source) # just for inner functions used_globals = _find_non_builtin_globals(source, codeobj) if used_globals: raise ValueError( "the use of non-builtin globals isn't supported", used_globals, ) return source ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_base.py0000644000076500000240000014346612754342136031055 0ustar alfredostaff00000000000000""" base execnet gateway code send to the other side for bootstrapping. NOTE: aims to be compatible to Python 2.5-3.X, Jython and IronPython (C) 2004-2013 Holger Krekel, Armin Rigo, Benjamin Peterson, Ronny Pfannschmidt and others """ from __future__ import with_statement import sys, os, weakref import traceback, struct # NOTE that we want to avoid try/except style importing # to avoid setting sys.exc_info() during import # ISPY3 = sys.version_info >= (3, 0) if ISPY3: from io import BytesIO exec("def do_exec(co, loc): exec(co, loc)\n" "def reraise(cls, val, tb): raise val\n") unicode = str _long_type = int from _thread import interrupt_main else: from StringIO import StringIO as BytesIO exec("def do_exec(co, loc): exec co in loc\n" "def reraise(cls, val, tb): raise cls, val, tb\n") bytes = str _long_type = long try: from thread import interrupt_main except ImportError: interrupt_main = None #f = open("/tmp/execnet-%s" % os.getpid(), "w") #def log_extra(*msg): # f.write(" ".join([str(x) for x in msg]) + "\n") class EmptySemaphore: acquire = release = lambda self: None def get_execmodel(backend): if hasattr(backend, "backend"): return backend if backend == "thread": importdef = { 'get_ident': ['thread::get_ident', '_thread::get_ident'], '_start_new_thread': ['thread::start_new_thread', '_thread::start_new_thread'], 'threading': ["threading",], 'queue': ["queue", "Queue"], 'sleep': ['time::sleep'], 'subprocess': ['subprocess'], 'socket': ['socket'], '_fdopen': ['os::fdopen'], '_lock': ['threading'], '_event': ['threading'], } def exec_start(self, func, args=()): self._start_new_thread(func, args) elif backend == "eventlet": importdef = { 'get_ident': ['eventlet.green.thread::get_ident'], '_spawn_n': ['eventlet::spawn_n'], 'threading': ['eventlet.green.threading'], 'queue': ["eventlet.queue"], 'sleep': ['eventlet::sleep'], 'subprocess': ['eventlet.green.subprocess'], 'socket': ['eventlet.green.socket'], '_fdopen': ['eventlet.green.os::fdopen'], '_lock': ['eventlet.green.threading'], '_event': ['eventlet.green.threading'], } def exec_start(self, func, args=()): self._spawn_n(func, *args) elif backend == "gevent": importdef = { 'get_ident': ['gevent.thread::get_ident'], '_spawn_n': ['gevent::spawn'], 'threading': ['threading'], 'queue': ["gevent.queue"], 'sleep': ['gevent::sleep'], 'subprocess': ['gevent.subprocess'], 'socket': ['gevent.socket'], # XXX '_fdopen': ['gevent.fileobject::FileObjectThread'], '_lock': ['gevent.lock'], '_event': ['gevent.event'], } def exec_start(self, func, args=()): self._spawn_n(func, *args) else: raise ValueError("unknown execmodel %r" %(backend,)) class ExecModel: def __init__(self, name): self._importdef = importdef self.backend = name self._count = 0 def __repr__(self): return "" % self.backend def __getattr__(self, name): locs = self._importdef.get(name) if locs is None: raise AttributeError(name) for loc in locs: parts = loc.split("::") loc = parts.pop(0) try: mod = __import__(loc, None, None, "__doc__") except ImportError: pass else: if parts: mod = getattr(mod, parts[0]) setattr(self, name, mod) return mod raise AttributeError(name) start = exec_start def fdopen(self, fd, mode, bufsize=1): return self._fdopen(fd, mode, bufsize) def WorkerPool(self, hasprimary=False): return WorkerPool(self, hasprimary=hasprimary) def Semaphore(self, size=None): if size is None: return EmptySemaphore() return self._lock.Semaphore(size) def Lock(self): return self._lock.RLock() def RLock(self): return self._lock.RLock() def Event(self): event = self._event.Event() if sys.version_info < (2,7): # patch wait function to return event state instead of None real_wait = event.wait def wait(timeout=None): real_wait(timeout=timeout) return event.isSet() event.wait = wait return event def PopenPiped(self, args): PIPE = self.subprocess.PIPE return self.subprocess.Popen(args, stdout=PIPE, stdin=PIPE) return ExecModel(backend) class Reply(object): """ reply instances provide access to the result of a function execution that got dispatched through WorkerPool.spawn() """ def __init__(self, task, threadmodel): self.task = task self._result_ready = threadmodel.Event() self.running = True def get(self, timeout=None): """ get the result object from an asynchronous function execution. if the function execution raised an exception, then calling get() will reraise that exception including its traceback. """ self.waitfinish(timeout) try: return self._result except AttributeError: reraise(*(self._excinfo[:3])) # noqa def waitfinish(self, timeout=None): if not self._result_ready.wait(timeout): raise IOError("timeout waiting for %r" %(self.task, )) def run(self): func, args, kwargs = self.task try: try: self._result = func(*args, **kwargs) except: # sys may be already None when shutting down the interpreter if sys is not None: self._excinfo = sys.exc_info() finally: self._result_ready.set() self.running = False class WorkerPool(object): """ A WorkerPool allows to spawn function executions to threads, returning a reply object on which you can ask for the result (and get exceptions reraised). This implementation allows the main thread to integrate itself into performing function execution through calling integrate_as_primary_thread() which will return when the pool received a trigger_shutdown(). """ def __init__(self, execmodel, hasprimary=False): """ by default allow unlimited number of spawns. """ self.execmodel = execmodel self._running_lock = self.execmodel.Lock() self._running = set() self._shuttingdown = False self._waitall_events = [] if hasprimary: if self.execmodel.backend != "thread": raise ValueError("hasprimary=True requires thread model") self._primary_thread_task_ready = self.execmodel.Event() else: self._primary_thread_task_ready = None def integrate_as_primary_thread(self): """ integrate the thread with which we are called as a primary thread for executing functions triggered with spawn(). """ assert self.execmodel.backend == "thread", self.execmodel primary_thread_task_ready = self._primary_thread_task_ready # interacts with code at REF1 while 1: primary_thread_task_ready.wait() reply = self._primary_thread_task if reply is None: # trigger_shutdown() woke us up break self._perform_spawn(reply) # we are concurrent with trigger_shutdown and spawn with self._running_lock: if self._shuttingdown: break primary_thread_task_ready.clear() def trigger_shutdown(self): with self._running_lock: self._shuttingdown = True if self._primary_thread_task_ready is not None: self._primary_thread_task = None self._primary_thread_task_ready.set() def active_count(self): return len(self._running) def _perform_spawn(self, reply): reply.run() with self._running_lock: self._running.remove(reply) if not self._running: while self._waitall_events: waitall_event = self._waitall_events.pop() waitall_event.set() def _try_send_to_primary_thread(self, reply): # REF1 in 'thread' model we give priority to running in main thread # note that we should be called with _running_lock hold primary_thread_task_ready = self._primary_thread_task_ready if primary_thread_task_ready is not None: if not primary_thread_task_ready.isSet(): self._primary_thread_task = reply # wake up primary thread primary_thread_task_ready.set() return True return False def spawn(self, func, *args, **kwargs): """ return Reply object for the asynchronous dispatch of the given func(*args, **kwargs). """ reply = Reply((func, args, kwargs), self.execmodel) with self._running_lock: if self._shuttingdown: raise ValueError("pool is shutting down") self._running.add(reply) if not self._try_send_to_primary_thread(reply): self.execmodel.start(self._perform_spawn, (reply,)) return reply def terminate(self, timeout=None): """ trigger shutdown and wait for completion of all executions. """ self.trigger_shutdown() return self.waitall(timeout=timeout) def waitall(self, timeout=None): """ wait until all active spawns have finished executing. """ with self._running_lock: if not self._running: return True # if a Reply still runs, we let run_and_release # signal us -- note that we are still holding the # _running_lock to avoid race conditions my_waitall_event = self.execmodel.Event() self._waitall_events.append(my_waitall_event) return my_waitall_event.wait(timeout=timeout) sysex = (KeyboardInterrupt, SystemExit) DEBUG = os.environ.get('EXECNET_DEBUG') pid = os.getpid() if DEBUG == '2': def trace(*msg): try: line = " ".join(map(str, msg)) sys.stderr.write("[%s] %s\n" % (pid, line)) sys.stderr.flush() except Exception: pass # nothing we can do, likely interpreter-shutdown elif DEBUG: import tempfile, os.path fn = os.path.join(tempfile.gettempdir(), 'execnet-debug-%d' % pid) #sys.stderr.write("execnet-debug at %r" %(fn,)) debugfile = open(fn, 'w') def trace(*msg): try: line = " ".join(map(str, msg)) debugfile.write(line + "\n") debugfile.flush() except Exception: try: v = sys.exc_info()[1] sys.stderr.write( "[%s] exception during tracing: %r\n" % (pid, v)) except Exception: pass # nothing we can do, likely interpreter-shutdown else: notrace = trace = lambda *msg: None class Popen2IO: error = (IOError, OSError, EOFError) def __init__(self, outfile, infile, execmodel): # we need raw byte streams self.outfile, self.infile = outfile, infile if sys.platform == "win32": import msvcrt try: msvcrt.setmode(infile.fileno(), os.O_BINARY) msvcrt.setmode(outfile.fileno(), os.O_BINARY) except (AttributeError, IOError): pass self._read = getattr(infile, "buffer", infile).read self._write = getattr(outfile, "buffer", outfile).write self.execmodel = execmodel def read(self, numbytes): """Read exactly 'numbytes' bytes from the pipe. """ # a file in non-blocking mode may return less bytes, so we loop buf = bytes() while numbytes > len(buf): data = self._read(numbytes-len(buf)) if not data: raise EOFError("expected %d bytes, got %d" %(numbytes, len(buf))) buf += data return buf def write(self, data): """write out all data bytes. """ assert isinstance(data, bytes) self._write(data) self.outfile.flush() def close_read(self): self.infile.close() def close_write(self): self.outfile.close() class Message: """ encapsulates Messages and their wire protocol. """ _types = [] def __init__(self, msgcode, channelid=0, data=''): self.msgcode = msgcode self.channelid = channelid self.data = data @staticmethod def from_io(io): try: header = io.read(9) # type 1, channel 4, payload 4 if not header: raise EOFError("empty read") except EOFError: e = sys.exc_info()[1] raise EOFError('couldnt load message header, ' + e.args[0]) msgtype, channel, payload = struct.unpack('!bii', header) return Message(msgtype, channel, io.read(payload)) def to_io(self, io): if struct.pack is not None: header = struct.pack('!bii', self.msgcode, self.channelid, len(self.data)) io.write(header+self.data) def received(self, gateway): self._types[self.msgcode](self, gateway) def __repr__(self): name = self._types[self.msgcode].__name__.upper() return "" %( name, self.channelid, len(self.data)) class GatewayReceivedTerminate(Exception): """ Receiverthread got termination message. """ def _setupmessages(): def status(message, gateway): # we use the channelid to send back information # but don't instantiate a channel object d = {'numchannels': len(gateway._channelfactory._channels), 'numexecuting': gateway._execpool.active_count(), 'execmodel': gateway.execmodel.backend, } gateway._send(Message.CHANNEL_DATA, message.channelid, dumps_internal(d)) gateway._send(Message.CHANNEL_CLOSE, message.channelid) def channel_exec(message, gateway): channel = gateway._channelfactory.new(message.channelid) gateway._local_schedulexec(channel=channel,sourcetask=message.data) def channel_data(message, gateway): gateway._channelfactory._local_receive(message.channelid, message.data) def channel_close(message, gateway): gateway._channelfactory._local_close(message.channelid) def channel_close_error(message, gateway): remote_error = RemoteError(loads_internal(message.data)) gateway._channelfactory._local_close(message.channelid, remote_error) def channel_last_message(message, gateway): gateway._channelfactory._local_close(message.channelid, sendonly=True) def gateway_terminate(message, gateway): raise GatewayReceivedTerminate(gateway) def reconfigure(message, gateway): if message.channelid == 0: target = gateway else: target = gateway._channelfactory.new(message.channelid) target._strconfig = loads_internal(message.data, gateway) types = [ status, reconfigure, gateway_terminate, channel_exec, channel_data, channel_close, channel_close_error, channel_last_message, ] for i, handler in enumerate(types): Message._types.append(handler) setattr(Message, handler.__name__.upper(), i) _setupmessages() def geterrortext(excinfo, format_exception=traceback.format_exception, sysex=sysex): try: l = format_exception(*excinfo) errortext = "".join(l) except sysex: raise except: errortext = '%s: %s' % (excinfo[0].__name__, excinfo[1]) return errortext class RemoteError(Exception): """ Exception containing a stringified error from the other side. """ def __init__(self, formatted): self.formatted = formatted Exception.__init__(self) def __str__(self): return self.formatted def __repr__(self): return "%s: %s" %(self.__class__.__name__, self.formatted) def warn(self): if self.formatted != INTERRUPT_TEXT: # XXX do this better sys.stderr.write("[%s] Warning: unhandled %r\n" % (os.getpid(), self,)) class TimeoutError(IOError): """ Exception indicating that a timeout was reached. """ NO_ENDMARKER_WANTED = object() class Channel(object): """Communication channel between two Python Interpreter execution points.""" RemoteError = RemoteError TimeoutError = TimeoutError _INTERNALWAKEUP = 1000 _executing = False def __init__(self, gateway, id): assert isinstance(id, int) self.gateway = gateway #XXX: defaults copied from Unserializer self._strconfig = getattr(gateway, '_strconfig', (True, False)) self.id = id self._items = self.gateway.execmodel.queue.Queue() self._closed = False self._receiveclosed = self.gateway.execmodel.Event() self._remoteerrors = [] def _trace(self, *msg): self.gateway._trace(self.id, *msg) def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED): """ set a callback function for receiving items. All already queued items will immediately trigger the callback. Afterwards the callback will execute in the receiver thread for each received data item and calls to ``receive()`` will raise an error. If an endmarker is specified the callback will eventually be called with the endmarker when the channel closes. """ _callbacks = self.gateway._channelfactory._callbacks with self.gateway._receivelock: if self._items is None: raise IOError("%r has callback already registered" %(self,)) items = self._items self._items = None while 1: try: olditem = items.get(block=False) except self.gateway.execmodel.queue.Empty: if not (self._closed or self._receiveclosed.isSet()): _callbacks[self.id] = ( callback, endmarker, self._strconfig, ) break else: if olditem is ENDMARKER: items.put(olditem) # for other receivers if endmarker is not NO_ENDMARKER_WANTED: callback(endmarker) break else: callback(olditem) def __repr__(self): flag = self.isclosed() and "closed" or "open" return "" % (self.id, flag) def __del__(self): if self.gateway is None: # can be None in tests return self._trace("channel.__del__") # no multithreading issues here, because we have the last ref to 'self' if self._closed: # state transition "closed" --> "deleted" for error in self._remoteerrors: error.warn() elif self._receiveclosed.isSet(): # state transition "sendonly" --> "deleted" # the remote channel is already in "deleted" state, nothing to do pass else: # state transition "opened" --> "deleted" # check if we are in the middle of interpreter shutdown # in which case the process will go away and we probably # don't need to try to send a closing or last message # (and often it won't work anymore to send things out) if Message is not None: if self._items is None: # has_callback msgcode = Message.CHANNEL_LAST_MESSAGE else: msgcode = Message.CHANNEL_CLOSE try: self.gateway._send(msgcode, self.id) except (IOError, ValueError): # ignore problems with sending pass def _getremoteerror(self): try: return self._remoteerrors.pop(0) except IndexError: try: return self.gateway._error except AttributeError: pass return None # # public API for channel objects # def isclosed(self): """ return True if the channel is closed. A closed channel may still hold items. """ return self._closed def makefile(self, mode='w', proxyclose=False): """ return a file-like object. mode can be 'w' or 'r' for writeable/readable files. if proxyclose is true file.close() will also close the channel. """ if mode == "w": return ChannelFileWrite(channel=self, proxyclose=proxyclose) elif mode == "r": return ChannelFileRead(channel=self, proxyclose=proxyclose) raise ValueError("mode %r not availabe" %(mode,)) def close(self, error=None): """ close down this channel with an optional error message. Note that closing of a channel tied to remote_exec happens automatically at the end of execution and cannot be done explicitely. """ if self._executing: raise IOError("cannot explicitly close channel within remote_exec") if self._closed: self.gateway._trace(self, "ignoring redundant call to close()") if not self._closed: # state transition "opened/sendonly" --> "closed" # threads warning: the channel might be closed under our feet, # but it's never damaging to send too many CHANNEL_CLOSE messages # however, if the other side triggered a close already, we # do not send back a closed message. if not self._receiveclosed.isSet(): put = self.gateway._send if error is not None: put(Message.CHANNEL_CLOSE_ERROR, self.id, dumps_internal(error)) else: put(Message.CHANNEL_CLOSE, self.id) self._trace("sent channel close message") if isinstance(error, RemoteError): self._remoteerrors.append(error) self._closed = True # --> "closed" self._receiveclosed.set() queue = self._items if queue is not None: queue.put(ENDMARKER) self.gateway._channelfactory._no_longer_opened(self.id) def waitclose(self, timeout=None): """ wait until this channel is closed (or the remote side otherwise signalled that no more data was being sent). The channel may still hold receiveable items, but not receive any more after waitclose() has returned. Exceptions from executing code on the other side are reraised as local channel.RemoteErrors. EOFError is raised if the reading-connection was prematurely closed, which often indicates a dying process. self.TimeoutError is raised after the specified number of seconds (default is None, i.e. wait indefinitely). """ self._receiveclosed.wait(timeout=timeout) # wait for non-"opened" state if not self._receiveclosed.isSet(): raise self.TimeoutError("Timeout after %r seconds" % timeout) error = self._getremoteerror() if error: raise error def send(self, item): """sends the given item to the other side of the channel, possibly blocking if the sender queue is full. The item must be a simple python type and will be copied to the other side by value. IOError is raised if the write pipe was prematurely closed. """ if self.isclosed(): raise IOError("cannot send to %r" %(self,)) self.gateway._send(Message.CHANNEL_DATA, self.id, dumps_internal(item)) def receive(self, timeout=None): """receive a data item that was sent from the other side. timeout: None [default] blocked waiting. A positive number indicates the number of seconds after which a channel.TimeoutError exception will be raised if no item was received. Note that exceptions from the remotely executing code will be reraised as channel.RemoteError exceptions containing a textual representation of the remote traceback. """ itemqueue = self._items if itemqueue is None: raise IOError("cannot receive(), channel has receiver callback") try: x = itemqueue.get(timeout=timeout) except self.gateway.execmodel.queue.Empty: raise self.TimeoutError("no item after %r seconds" %(timeout)) if x is ENDMARKER: itemqueue.put(x) # for other receivers raise self._getremoteerror() or EOFError() else: return x def __iter__(self): return self def next(self): try: return self.receive() except EOFError: raise StopIteration __next__ = next def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): """ set the string coercion for this channel the default is to try to convert py2 str as py3 str, but not to try and convert py3 str to py2 str """ self._strconfig = (py2str_as_py3str, py3str_as_py2str) data = dumps_internal(self._strconfig) self.gateway._send(Message.RECONFIGURE, self.id, data=data) ENDMARKER = object() INTERRUPT_TEXT = "keyboard-interrupted" class ChannelFactory(object): def __init__(self, gateway, startcount=1): self._channels = weakref.WeakValueDictionary() self._callbacks = {} self._writelock = gateway.execmodel.Lock() self.gateway = gateway self.count = startcount self.finished = False self._list = list # needed during interp-shutdown def new(self, id=None): """ create a new Channel with 'id' (or create new id if None). """ with self._writelock: if self.finished: raise IOError("connexion already closed: %s" % (self.gateway,)) if id is None: id = self.count self.count += 2 try: channel = self._channels[id] except KeyError: channel = self._channels[id] = Channel(self.gateway, id) return channel def channels(self): return self._list(self._channels.values()) # # internal methods, called from the receiver thread # def _no_longer_opened(self, id): try: del self._channels[id] except KeyError: pass try: callback, endmarker, strconfig = self._callbacks.pop(id) except KeyError: pass else: if endmarker is not NO_ENDMARKER_WANTED: callback(endmarker) def _local_close(self, id, remoteerror=None, sendonly=False): channel = self._channels.get(id) if channel is None: # channel already in "deleted" state if remoteerror: remoteerror.warn() self._no_longer_opened(id) else: # state transition to "closed" state if remoteerror: channel._remoteerrors.append(remoteerror) queue = channel._items if queue is not None: queue.put(ENDMARKER) self._no_longer_opened(id) if not sendonly: # otherwise #--> "sendonly" channel._closed = True # --> "closed" channel._receiveclosed.set() def _local_receive(self, id, data): # executes in receiver thread channel = self._channels.get(id) try: callback, endmarker, strconfig = self._callbacks[id] except KeyError: queue = channel and channel._items if queue is None: pass # drop data else: item = loads_internal(data, channel) queue.put(item) else: try: data = loads_internal(data, channel, strconfig) callback(data) # even if channel may be already closed except Exception: excinfo = sys.exc_info() self.gateway._trace("exception during callback: %s" % excinfo[1]) errortext = self.gateway._geterrortext(excinfo) self.gateway._send(Message.CHANNEL_CLOSE_ERROR, id, dumps_internal(errortext)) self._local_close(id, errortext) def _finished_receiving(self): with self._writelock: self.finished = True for id in self._list(self._channels): self._local_close(id, sendonly=True) for id in self._list(self._callbacks): self._no_longer_opened(id) class ChannelFile(object): def __init__(self, channel, proxyclose=True): self.channel = channel self._proxyclose = proxyclose def isatty(self): return False def close(self): if self._proxyclose: self.channel.close() def __repr__(self): state = self.channel.isclosed() and 'closed' or 'open' return '' %(self.channel.id, state) class ChannelFileWrite(ChannelFile): def write(self, out): self.channel.send(out) def flush(self): pass class ChannelFileRead(ChannelFile): def __init__(self, channel, proxyclose=True): super(ChannelFileRead, self).__init__(channel, proxyclose) self._buffer = None def read(self, n): try: if self._buffer is None: self._buffer = self.channel.receive() while len(self._buffer) < n: self._buffer += self.channel.receive() except EOFError: self.close() if self._buffer is None: ret = "" else: ret = self._buffer[:n] self._buffer = self._buffer[n:] return ret def readline(self): if self._buffer is not None: i = self._buffer.find("\n") if i != -1: return self.read(i+1) line = self.read(len(self._buffer)+1) else: line = self.read(1) while line and line[-1] != "\n": c = self.read(1) if not c: break line += c return line class BaseGateway(object): exc_info = sys.exc_info _sysex = sysex id = "" def __init__(self, io, id, _startcount=2): self.execmodel = io.execmodel self._io = io self.id = id self._strconfig = (Unserializer.py2str_as_py3str, Unserializer.py3str_as_py2str) self._channelfactory = ChannelFactory(self, _startcount) self._receivelock = self.execmodel.RLock() # globals may be NONE at process-termination self.__trace = trace self._geterrortext = geterrortext self._receivepool = self.execmodel.WorkerPool() def _trace(self, *msg): self.__trace(self.id, *msg) def _initreceive(self): self._receivepool.spawn(self._thread_receiver) def _thread_receiver(self): def log(*msg): self._trace("[receiver-thread]", *msg) log("RECEIVERTHREAD: starting to run") io = self._io try: while 1: msg = Message.from_io(io) log("received", msg) with self._receivelock: msg.received(self) del msg except (KeyboardInterrupt, GatewayReceivedTerminate): pass except EOFError: log("EOF without prior gateway termination message") self._error = self.exc_info()[1] except Exception: log(self._geterrortext(self.exc_info())) log('finishing receiving thread') # wake up and terminate any execution waiting to receive self._channelfactory._finished_receiving() log('terminating execution') self._terminate_execution() log('closing read') self._io.close_read() log('closing write') self._io.close_write() log('terminating our receive pseudo pool') self._receivepool.trigger_shutdown() def _terminate_execution(self): pass def _send(self, msgcode, channelid=0, data=bytes()): message = Message(msgcode, channelid, data) try: message.to_io(self._io) self._trace('sent', message) except (IOError, ValueError): e = sys.exc_info()[1] self._trace('failed to send', message, e) # ValueError might be because the IO is already closed raise IOError("cannot send (already closed?)") def _local_schedulexec(self, channel, sourcetask): channel.close("execution disallowed") # _____________________________________________________________________ # # High Level Interface # _____________________________________________________________________ # def newchannel(self): """ return a new independent channel. """ return self._channelfactory.new() def join(self, timeout=None): """ Wait for receiverthread to terminate. """ self._trace("waiting for receiver thread to finish") self._receivepool.waitall() class SlaveGateway(BaseGateway): def _local_schedulexec(self, channel, sourcetask): sourcetask = loads_internal(sourcetask) self._execpool.spawn(self.executetask, ((channel, sourcetask))) def _terminate_execution(self): # called from receiverthread self._trace("shutting down execution pool") self._execpool.trigger_shutdown() if not self._execpool.waitall(5.0): self._trace("execution ongoing after 5 secs, trying interrupt_main") # We try hard to terminate execution based on the assumption # that there is only one gateway object running per-process. if sys.platform != "win32": self._trace("sending ourselves a SIGINT") os.kill(os.getpid(), 2) # send ourselves a SIGINT elif interrupt_main is not None: self._trace("calling interrupt_main()") interrupt_main() if not self._execpool.waitall(10.0): self._trace("execution did not finish in another 10 secs, " "calling os._exit()") os._exit(1) def serve(self): trace = lambda msg: self._trace("[serve] " + msg) hasprimary = self.execmodel.backend == "thread" self._execpool = self.execmodel.WorkerPool(hasprimary=hasprimary) trace("spawning receiver thread") self._initreceive() try: if hasprimary: # this will return when we are in shutdown trace("integrating as primary thread") self._execpool.integrate_as_primary_thread() trace("joining receiver thread") self.join() except KeyboardInterrupt: # in the slave we can't really do anything sensible trace("swallowing keyboardinterrupt, serve finished") def executetask(self, item): try: channel, (source, call_name, kwargs) = item if not ISPY3 and kwargs: # some python2 versions do not accept unicode keyword params # note: Unserializer generally turns py2-str to py3-str objects newkwargs = {} for name, value in kwargs.items(): if isinstance(name, unicode): name = name.encode('ascii') newkwargs[name] = value kwargs = newkwargs loc = {'channel' : channel, '__name__': '__channelexec__'} self._trace("execution starts[%s]: %s" % (channel.id, repr(source)[:50])) channel._executing = True try: co = compile(source+'\n', '', 'exec') do_exec(co, loc) # noqa if call_name: self._trace('calling %s(**%60r)' % (call_name, kwargs)) function = loc[call_name] function(channel, **kwargs) finally: channel._executing = False self._trace("execution finished") except KeyboardInterrupt: channel.close(INTERRUPT_TEXT) raise except: excinfo = self.exc_info() if not isinstance(excinfo[1], EOFError): if not channel.gateway._channelfactory.finished: self._trace("got exception: %r" % (excinfo[1],)) errortext = self._geterrortext(excinfo) channel.close(errortext) return self._trace("ignoring EOFError because receiving finished") channel.close() # # Cross-Python pickling code, tested from test_serializer.py # class DataFormatError(Exception): pass class DumpError(DataFormatError): """Error while serializing an object.""" class LoadError(DataFormatError): """Error while unserializing an object.""" if ISPY3: def bchr(n): return bytes([n]) else: bchr = chr DUMPFORMAT_VERSION = bchr(1) FOUR_BYTE_INT_MAX = 2147483647 FLOAT_FORMAT = "!d" FLOAT_FORMAT_SIZE = struct.calcsize(FLOAT_FORMAT) class _Stop(Exception): pass class Unserializer(object): num2func = {} # is filled after this class definition py2str_as_py3str = True # True py3str_as_py2str = False # false means py2 will get unicode def __init__(self, stream, channel_or_gateway=None, strconfig=None): gateway = getattr(channel_or_gateway, 'gateway', channel_or_gateway) strconfig = getattr(channel_or_gateway, '_strconfig', strconfig) if strconfig: self.py2str_as_py3str, self.py3str_as_py2str = strconfig self.stream = stream self.channelfactory = getattr(gateway, '_channelfactory', gateway) def load(self, versioned=False): if versioned: ver = self.stream.read(1) if ver != DUMPFORMAT_VERSION: raise LoadError("wrong dumpformat version %r" % ver) self.stack = [] try: while True: opcode = self.stream.read(1) if not opcode: raise EOFError try: loader = self.num2func[opcode] except KeyError: raise LoadError("unkown opcode %r - " "wire protocol corruption?" % (opcode,)) loader(self) except _Stop: if len(self.stack) != 1: raise LoadError("internal unserialization error") return self.stack.pop(0) else: raise LoadError("didn't get STOP") def load_none(self): self.stack.append(None) def load_true(self): self.stack.append(True) def load_false(self): self.stack.append(False) def load_int(self): i = self._read_int4() self.stack.append(i) def load_longint(self): s = self._read_byte_string() self.stack.append(int(s)) if ISPY3: load_long = load_int load_longlong = load_longint else: def load_long(self): i = self._read_int4() self.stack.append(long(i)) def load_longlong(self): l = self._read_byte_string() self.stack.append(long(l)) def load_float(self): binary = self.stream.read(FLOAT_FORMAT_SIZE) self.stack.append(struct.unpack(FLOAT_FORMAT, binary)[0]) def _read_int4(self): return struct.unpack("!i", self.stream.read(4))[0] def _read_byte_string(self): length = self._read_int4() as_bytes = self.stream.read(length) return as_bytes def load_py3string(self): as_bytes = self._read_byte_string() if not ISPY3 and self.py3str_as_py2str: # XXX Should we try to decode into latin-1? self.stack.append(as_bytes) else: self.stack.append(as_bytes.decode("utf-8")) def load_py2string(self): as_bytes = self._read_byte_string() if ISPY3 and self.py2str_as_py3str: s = as_bytes.decode("latin-1") else: s = as_bytes self.stack.append(s) def load_bytes(self): s = self._read_byte_string() self.stack.append(s) def load_unicode(self): self.stack.append(self._read_byte_string().decode("utf-8")) def load_newlist(self): length = self._read_int4() self.stack.append([None] * length) def load_setitem(self): if len(self.stack) < 3: raise LoadError("not enough items for setitem") value = self.stack.pop() key = self.stack.pop() self.stack[-1][key] = value def load_newdict(self): self.stack.append({}) def _load_collection(self, type_): length = self._read_int4() if length: res = type_(self.stack[-length:]) del self.stack[-length:] self.stack.append(res) else: self.stack.append(type_()) def load_buildtuple(self): self._load_collection(tuple) def load_set(self): self._load_collection(set) def load_frozenset(self): self._load_collection(frozenset) def load_stop(self): raise _Stop def load_channel(self): id = self._read_int4() newchannel = self.channelfactory.new(id) self.stack.append(newchannel) # automatically build opcodes and byte-encoding class opcode: """ container for name -> num mappings. """ def _buildopcodes(): l = [] for name, func in Unserializer.__dict__.items(): if name.startswith("load_"): opname = name[5:].upper() l.append((opname, func)) l.sort() for i,(opname, func) in enumerate(l): assert i < 26, "xxx" i = bchr(64+i) Unserializer.num2func[i] = func setattr(opcode, opname, i) _buildopcodes() def dumps(obj): """ return a serialized bytestring of the given obj. The obj and all contained objects must be of a builtin python type (so nested dicts, sets, etc. are all ok but not user-level instances). """ return _Serializer().save(obj, versioned=True) def dump(byteio, obj): """ write a serialized bytestring of the given obj to the given stream. """ _Serializer(write=byteio.write).save(obj, versioned=True) def loads(bytestring, py2str_as_py3str=False, py3str_as_py2str=False): """ return the object as deserialized from the given bytestring. py2str_as_py3str: if true then string (str) objects previously dumped on Python2 will be loaded as Python3 strings which really are text objects. py3str_as_py2str: if true then string (str) objects previously dumped on Python3 will be loaded as Python2 strings instead of unicode objects. if the bytestring was dumped with an incompatible protocol version or if the bytestring is corrupted, the ``execnet.DataFormatError`` will be raised. """ io = BytesIO(bytestring) return load(io, py2str_as_py3str=py2str_as_py3str, py3str_as_py2str=py3str_as_py2str) def load(io, py2str_as_py3str=False, py3str_as_py2str=False): """ derserialize an object form the specified stream. Behaviour and parameters are otherwise the same as with ``loads`` """ strconfig=(py2str_as_py3str, py3str_as_py2str) return Unserializer(io, strconfig=strconfig).load(versioned=True) def loads_internal(bytestring, channelfactory=None, strconfig=None): io = BytesIO(bytestring) return Unserializer(io, channelfactory, strconfig).load() def dumps_internal(obj): return _Serializer().save(obj) class _Serializer(object): _dispatch = {} def __init__(self, write=None): if write is None: self._streamlist = [] write = self._streamlist.append self._write = write def save(self, obj, versioned=False): # calling here is not re-entrant but multiple instances # may write to the same stream because of the common platform # atomic-write guaruantee (concurrent writes each happen atomicly) if versioned: self._write(DUMPFORMAT_VERSION) self._save(obj) self._write(opcode.STOP) try: streamlist = self._streamlist except AttributeError: return None return type(streamlist[0])().join(streamlist) def _save(self, obj): tp = type(obj) try: dispatch = self._dispatch[tp] except KeyError: methodname = 'save_' + tp.__name__ meth = getattr(self.__class__, methodname, None) if meth is None: raise DumpError("can't serialize %s" % (tp,)) dispatch = self._dispatch[tp] = meth dispatch(self, obj) def save_NoneType(self, non): self._write(opcode.NONE) def save_bool(self, boolean): if boolean: self._write(opcode.TRUE) else: self._write(opcode.FALSE) def save_bytes(self, bytes_): self._write(opcode.BYTES) self._write_byte_sequence(bytes_) if ISPY3: def save_str(self, s): self._write(opcode.PY3STRING) self._write_unicode_string(s) else: def save_str(self, s): self._write(opcode.PY2STRING) self._write_byte_sequence(s) def save_unicode(self, s): self._write(opcode.UNICODE) self._write_unicode_string(s) def _write_unicode_string(self, s): try: as_bytes = s.encode("utf-8") except UnicodeEncodeError: raise DumpError("strings must be utf-8 encodable") self._write_byte_sequence(as_bytes) def _write_byte_sequence(self, bytes_): self._write_int4(len(bytes_), "string is too long") self._write(bytes_) def _save_integral(self, i, short_op, long_op): if i <= FOUR_BYTE_INT_MAX: self._write(short_op) self._write_int4(i) else: self._write(long_op) self._write_byte_sequence(str(i).rstrip("L").encode("ascii")) def save_int(self, i): self._save_integral(i, opcode.INT, opcode.LONGINT) def save_long(self, l): self._save_integral(l, opcode.LONG, opcode.LONGLONG) def save_float(self, flt): self._write(opcode.FLOAT) self._write(struct.pack(FLOAT_FORMAT, flt)) def _write_int4(self, i, error="int must be less than %i" % (FOUR_BYTE_INT_MAX,)): if i > FOUR_BYTE_INT_MAX: raise DumpError(error) self._write(struct.pack("!i", i)) def save_list(self, L): self._write(opcode.NEWLIST) self._write_int4(len(L), "list is too long") for i, item in enumerate(L): self._write_setitem(i, item) def _write_setitem(self, key, value): self._save(key) self._save(value) self._write(opcode.SETITEM) def save_dict(self, d): self._write(opcode.NEWDICT) for key, value in d.items(): self._write_setitem(key, value) def save_tuple(self, tup): for item in tup: self._save(item) self._write(opcode.BUILDTUPLE) self._write_int4(len(tup), "tuple is too long") def _write_set(self, s, op): for item in s: self._save(item) self._write(op) self._write_int4(len(s), "set is too long") def save_set(self, s): self._write_set(s, opcode.SET) def save_frozenset(self, s): self._write_set(s, opcode.FROZENSET) def save_Channel(self, channel): self._write(opcode.CHANNEL) self._write_int4(channel.id) def init_popen_io(execmodel): if not hasattr(os, 'dup'): # jython io = Popen2IO(sys.stdout, sys.stdin, execmodel) import tempfile sys.stdin = tempfile.TemporaryFile('r') sys.stdout = tempfile.TemporaryFile('w') else: try: devnull = os.devnull except AttributeError: if os.name == 'nt': devnull = 'NUL' else: devnull = '/dev/null' # stdin stdin = execmodel.fdopen(os.dup(0), 'r', 1) fd = os.open(devnull, os.O_RDONLY) os.dup2(fd, 0) os.close(fd) # stdout stdout = execmodel.fdopen(os.dup(1), 'w', 1) fd = os.open(devnull, os.O_WRONLY) os.dup2(fd, 1) # stderr for win32 if os.name == 'nt': sys.stderr = execmodel.fdopen(os.dup(2), 'w', 1) os.dup2(fd, 2) os.close(fd) io = Popen2IO(stdout, stdin, execmodel) sys.stdin = execmodel.fdopen(0, 'r', 1) sys.stdout = execmodel.fdopen(1, 'w', 1) return io def serve(io, id): trace("creating slavegateway on %r" %(io,)) SlaveGateway(io=io, id=id, _startcount=2).serve() ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_bootstrap.py0000644000076500000240000000505412754342136032146 0ustar alfredostaff00000000000000""" code to initialize the remote side of a gateway once the io is created """ import os import inspect import execnet from execnet import gateway_base from execnet.gateway import Gateway importdir = os.path.dirname(os.path.dirname(execnet.__file__)) class HostNotFound(Exception): pass def bootstrap_popen(io, spec): sendexec(io, "import sys", "sys.path.insert(0, %r)" % importdir, "from execnet.gateway_base import serve, init_popen_io, get_execmodel", "sys.stdout.write('1')", "sys.stdout.flush()", "execmodel = get_execmodel(%r)" % spec.execmodel, "serve(init_popen_io(execmodel), id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode('ascii'), repr(s) def bootstrap_ssh(io, spec): try: sendexec(io, inspect.getsource(gateway_base), "execmodel = get_execmodel(%r)" % spec.execmodel, 'io = init_popen_io(execmodel)', "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode('ascii') except EOFError: ret = io.wait() if ret == 255: raise HostNotFound(io.remoteaddress) def bootstrap_socket(io, id): #XXX: switch to spec from execnet.gateway_socket import SocketIO sendexec(io, inspect.getsource(gateway_base), 'import socket', inspect.getsource(SocketIO), "try: execmodel", "except NameError:", " execmodel = get_execmodel('thread')", "io = SocketIO(clientsock, execmodel)", "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % id, ) s = io.read(1) assert s == "1".encode('ascii') def sendexec(io, *sources): source = "\n".join(sources) io.write((repr(source)+ "\n").encode('ascii')) def fix_pid_for_jython_popen(gw): """ fix for jython 2.5.1 """ spec, io = gw.spec, gw._io if spec.popen and not spec.via: #XXX: handle the case of remote being jython # and not having the popen pid if io.popen.pid is None: io.popen.pid = gw.remote_exec( "import os; channel.send(os.getpid())").receive() def bootstrap(io, spec): if spec.popen: bootstrap_popen(io, spec) elif spec.ssh: bootstrap_ssh(io, spec) elif spec.socket: bootstrap_socket(io, spec) else: raise ValueError('unknown gateway type, cant bootstrap') gw = Gateway(io, spec) fix_pid_for_jython_popen(gw) return gw ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_io.py0000644000076500000240000001450712754342136030543 0ustar alfredostaff00000000000000""" execnet io initialization code creates io instances used for gateway io """ import os import sys try: from execnet.gateway_base import Popen2IO, Message except ImportError: from __main__ import Popen2IO, Message class Popen2IOMaster(Popen2IO): def __init__(self, args, execmodel): self.popen = p = execmodel.PopenPiped(args) Popen2IO.__init__(self, p.stdin, p.stdout, execmodel=execmodel) def wait(self): try: return self.popen.wait() except OSError: pass # subprocess probably dead already def kill(self): killpopen(self.popen) def killpopen(popen): try: if hasattr(popen, 'kill'): popen.kill() else: killpid(popen.pid) except EnvironmentError: sys.stderr.write("ERROR killing: %s\n" %(sys.exc_info()[1])) sys.stderr.flush() def killpid(pid): if hasattr(os, 'kill'): os.kill(pid, 15) elif sys.platform == "win32" or getattr(os, '_name', None) == 'nt': try: import ctypes except ImportError: import subprocess # T: treekill, F: Force cmd = ("taskkill /T /F /PID %d" %(pid)).split() ret = subprocess.call(cmd) if ret != 0: raise EnvironmentError("taskkill returned %r" %(ret,)) else: PROCESS_TERMINATE = 1 handle = ctypes.windll.kernel32.OpenProcess( PROCESS_TERMINATE, False, pid) ctypes.windll.kernel32.TerminateProcess(handle, -1) ctypes.windll.kernel32.CloseHandle(handle) else: raise EnvironmentError("no method to kill %s" %(pid,)) popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))" def popen_args(spec): python = spec.python or sys.executable args = str(python).split(' ') args.append('-u') if spec is not None and spec.dont_write_bytecode: args.append("-B") # Slight gymnastics in ordering these arguments because CPython (as of # 2.7.1) ignores -B if you provide `python -c "something" -B` args.extend(['-c', popen_bootstrapline]) return args def ssh_args(spec): remotepython = spec.python or "python" args = ["ssh", "-C" ] if spec.ssh_config is not None: args.extend(['-F', str(spec.ssh_config)]) args.extend(spec.ssh.split()) remotecmd = '%s -c "%s"' % (remotepython, popen_bootstrapline) args.append(remotecmd) return args def create_io(spec, execmodel): if spec.popen: args = popen_args(spec) return Popen2IOMaster(args, execmodel) if spec.ssh: args = ssh_args(spec) io = Popen2IOMaster(args, execmodel) io.remoteaddress = spec.ssh return io # # Proxy Gateway handling code # # master: proxy initiator # forwarder: forwards between master and sub # sub: sub process that is proxied to the initiator RIO_KILL = 1 RIO_WAIT = 2 RIO_REMOTEADDRESS = 3 RIO_CLOSE_WRITE = 4 class ProxyIO(object): """ A Proxy IO object allows to instantiate a Gateway through another "via" gateway. A master:ProxyIO object provides an IO object effectively connected to the sub via the forwarder. To achieve this, master:ProxyIO interacts with forwarder:serve_proxy_io() which itself instantiates and interacts with the sub. """ def __init__(self, proxy_channel, execmodel): # after exchanging the control channel we use proxy_channel # for messaging IO self.controlchan = proxy_channel.gateway.newchannel() proxy_channel.send(self.controlchan) self.iochan = proxy_channel self.iochan_file = self.iochan.makefile('r') self.execmodel = execmodel def read(self, nbytes): return self.iochan_file.read(nbytes) def write(self, data): return self.iochan.send(data) def _controll(self, event): self.controlchan.send(event) return self.controlchan.receive() def close_write(self): self._controll(RIO_CLOSE_WRITE) def kill(self): self._controll(RIO_KILL) def wait(self): return self._controll(RIO_WAIT) @property def remoteaddress(self): return self._controll(RIO_REMOTEADDRESS) def __repr__(self): return '' % (self.iochan.gateway.id, ) class PseudoSpec: def __init__(self, vars): self.__dict__.update(vars) def __getattr__(self, name): return None def serve_proxy_io(proxy_channelX): execmodel = proxy_channelX.gateway.execmodel _trace = proxy_channelX.gateway._trace tag = "serve_proxy_io:%s " % proxy_channelX.id def log(*msg): _trace(tag + msg[0], *msg[1:]) spec = PseudoSpec(proxy_channelX.receive()) # create sub IO object which we will proxy back to our proxy initiator sub_io = create_io(spec, execmodel) control_chan = proxy_channelX.receive() log("got control chan", control_chan) # read data from master, forward it to the sub # XXX writing might block, thus blocking the receiver thread def forward_to_sub(data): log("forward data to sub, size %s" % len(data)) sub_io.write(data) proxy_channelX.setcallback(forward_to_sub) def controll(data): if data==RIO_WAIT: control_chan.send(sub_io.wait()) elif data==RIO_KILL: control_chan.send(sub_io.kill()) elif data==RIO_REMOTEADDRESS: control_chan.send(sub_io.remoteaddress) elif data==RIO_CLOSE_WRITE: control_chan.send(sub_io.close_write()) control_chan.setcallback(controll) # write data to the master coming from the sub forward_to_master_file = proxy_channelX.makefile("w") # read bootstrap byte from sub, send it on to master log('reading bootstrap byte from sub', spec.id) initial = sub_io.read(1) assert initial == '1'.encode('ascii'), initial log('forwarding bootstrap byte from sub', spec.id) forward_to_master_file.write(initial) # enter message forwarding loop while True: try: message = Message.from_io(sub_io) except EOFError: log('EOF from sub, terminating proxying loop', spec.id) break message.to_io(forward_to_master_file) # proxy_channelX will be closed from remote_exec's finalization code if __name__ == "__channelexec__": serve_proxy_io(channel) # noqa ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_socket.py0000644000076500000240000000476512754342136031431 0ustar alfredostaff00000000000000from execnet.gateway_bootstrap import HostNotFound import sys try: bytes except NameError: bytes = str class SocketIO: def __init__(self, sock, execmodel): self.sock = sock self.execmodel = execmodel socket = execmodel.socket try: sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10)# IPTOS_LOWDELAY sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) except (AttributeError, socket.error): sys.stderr.write("WARNING: cannot set socketoption") def read(self, numbytes): "Read exactly 'bytes' bytes from the socket." buf = bytes() while len(buf) < numbytes: t = self.sock.recv(numbytes - len(buf)) if not t: raise EOFError buf += t return buf def write(self, data): self.sock.sendall(data) def close_read(self): try: self.sock.shutdown(0) except self.execmodel.socket.error: pass def close_write(self): try: self.sock.shutdown(1) except self.execmodel.socket.error: pass def wait(self): pass def kill(self): pass def start_via(gateway, hostport=None): """ return a host, port tuple, after instanciating a socketserver on the given gateway """ if hostport is None: host, port = ('localhost', 0) else: host, port = hostport from execnet.script import socketserver # execute the above socketserverbootstrap on the other side channel = gateway.remote_exec(socketserver) channel.send((host, port)) (realhost, realport) = channel.receive() #self._trace("new_remote received" # "port=%r, hostname = %r" %(realport, hostname)) if not realhost or realhost=="0.0.0.0": realhost = "localhost" return realhost, realport def create_io(spec, group, execmodel): assert not spec.python, ( "socket: specifying python executables not yet supported") gateway_id = spec.installvia if gateway_id: host, port = start_via(group[gateway_id]) else: host, port = spec.socket.split(":") port = int(port) socket = execmodel.socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) io = SocketIO(sock, execmodel) io.remoteaddress = '%s:%d' % (host, port) try: sock.connect((host, port)) except execmodel.socket.gaierror: raise HostNotFound(str(sys.exc_info()[1])) return io ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/multi.py0000644000076500000240000002347612754342136027552 0ustar alfredostaff00000000000000""" Managing Gateway Groups and interactions with multiple channels. (c) 2008-2014, Holger Krekel and others """ import sys, atexit from execnet import XSpec from execnet import gateway_io, gateway_bootstrap from execnet.gateway_base import reraise, trace, get_execmodel from threading import Lock NO_ENDMARKER_WANTED = object() class Group(object): """ Gateway Groups. """ defaultspec = "popen" def __init__(self, xspecs=(), execmodel="thread"): """ initialize group and make gateways as specified. execmodel can be 'thread' or 'eventlet'. """ self._gateways = [] self._autoidcounter = 0 self._autoidlock = Lock() self._gateways_to_join = [] # we use the same execmodel for all of the Gateway objects # we spawn on our side. Probably we should not allow different # execmodels between different groups but not clear. # Note that "other side" execmodels may differ and is typically # specified by the spec passed to makegateway. self.set_execmodel(execmodel) for xspec in xspecs: self.makegateway(xspec) atexit.register(self._cleanup_atexit) @property def execmodel(self): return self._execmodel @property def remote_execmodel(self): return self._remote_execmodel def set_execmodel(self, execmodel, remote_execmodel=None): """ Set the execution model for local and remote site. execmodel can be one of "thread" or "eventlet" (XXX gevent). It determines the execution model for any newly created gateway. If remote_execmodel is not specified it takes on the value of execmodel. NOTE: Execution models can only be set before any gateway is created. """ if self._gateways: raise ValueError("can not set execution models if " "gateways have been created already") if remote_execmodel is None: remote_execmodel = execmodel self._execmodel = get_execmodel(execmodel) self._remote_execmodel = get_execmodel(remote_execmodel) def __repr__(self): idgateways = [gw.id for gw in self] return "" %(idgateways) def __getitem__(self, key): if isinstance(key, int): return self._gateways[key] for gw in self._gateways: if gw == key or gw.id == key: return gw raise KeyError(key) def __contains__(self, key): try: self[key] return True except KeyError: return False def __len__(self): return len(self._gateways) def __iter__(self): return iter(list(self._gateways)) def makegateway(self, spec=None): """create and configure a gateway to a Python interpreter. The ``spec`` string encodes the target gateway type and configuration information. The general format is:: key1=value1//key2=value2//... If you leave out the ``=value`` part a True value is assumed. Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``. Valid configuration:: id= specifies the gateway id python= specifies which python interpreter to execute execmodel=model 'thread', 'eventlet', 'gevent' model for execution chdir= specifies to which directory to change nice= specifies process priority of new process env:NAME=value specifies a remote environment variable setting. If no spec is given, self.defaultspec is used. """ if not spec: spec = self.defaultspec if not isinstance(spec, XSpec): spec = XSpec(spec) self.allocate_id(spec) if spec.execmodel is None: spec.execmodel = self.remote_execmodel.backend if spec.via: assert not spec.socket master = self[spec.via] proxy_channel = master.remote_exec(gateway_io) proxy_channel.send(vars(spec)) proxy_io_master = gateway_io.ProxyIO(proxy_channel, self.execmodel) gw = gateway_bootstrap.bootstrap(proxy_io_master, spec) elif spec.popen or spec.ssh: io = gateway_io.create_io(spec, execmodel=self.execmodel) gw = gateway_bootstrap.bootstrap(io, spec) elif spec.socket: from execnet import gateway_socket io = gateway_socket.create_io(spec, self, execmodel=self.execmodel) gw = gateway_bootstrap.bootstrap(io, spec) else: raise ValueError("no gateway type found for %r" % (spec._spec,)) gw.spec = spec self._register(gw) if spec.chdir or spec.nice or spec.env: channel = gw.remote_exec(""" import os path, nice, env = channel.receive() if path: if not os.path.exists(path): os.mkdir(path) os.chdir(path) if nice and hasattr(os, 'nice'): os.nice(nice) if env: for name, value in env.items(): os.environ[name] = value """) nice = spec.nice and int(spec.nice) or 0 channel.send((spec.chdir, nice, spec.env)) channel.waitclose() return gw def allocate_id(self, spec): """ (re-entrant) allocate id for the given xspec object. """ if spec.id is None: with self._autoidlock: id = "gw" + str(self._autoidcounter) self._autoidcounter += 1 if id in self: raise ValueError("already have gateway with id %r" %(id,)) spec.id = id def _register(self, gateway): assert not hasattr(gateway, '_group') assert gateway.id assert id not in self self._gateways.append(gateway) gateway._group = self def _unregister(self, gateway): self._gateways.remove(gateway) self._gateways_to_join.append(gateway) def _cleanup_atexit(self): trace("=== atexit cleanup %r ===" %(self,)) self.terminate(timeout=1.0) def terminate(self, timeout=None): """ trigger exit of member gateways and wait for termination of member gateways and associated subprocesses. After waiting timeout seconds try to to kill local sub processes of popen- and ssh-gateways. Timeout defaults to None meaning open-ended waiting and no kill attempts. """ while self: vias = {} for gw in self: if gw.spec.via: vias[gw.spec.via] = True for gw in self: if gw.id not in vias: gw.exit() def join_wait(gw): gw.join() gw._io.wait() def kill(gw): trace("Gateways did not come down after timeout: %r" % gw) gw._io.kill() safe_terminate(self.execmodel, timeout, [ (lambda: join_wait(gw), lambda: kill(gw)) for gw in self._gateways_to_join]) self._gateways_to_join[:] = [] def remote_exec(self, source, **kwargs): """ remote_exec source on all member gateways and return MultiChannel connecting to all sub processes. """ channels = [] for gw in self: channels.append(gw.remote_exec(source, **kwargs)) return MultiChannel(channels) class MultiChannel: def __init__(self, channels): self._channels = channels def __len__(self): return len(self._channels) def __iter__(self): return iter(self._channels) def __getitem__(self, key): return self._channels[key] def __contains__(self, chan): return chan in self._channels def send_each(self, item): for ch in self._channels: ch.send(item) def receive_each(self, withchannel=False): assert not hasattr(self, '_queue') l = [] for ch in self._channels: obj = ch.receive() if withchannel: l.append((ch, obj)) else: l.append(obj) return l def make_receive_queue(self, endmarker=NO_ENDMARKER_WANTED): try: return self._queue except AttributeError: self._queue = None for ch in self._channels: if self._queue is None: self._queue = ch.gateway.execmodel.queue.Queue() def putreceived(obj, channel=ch): self._queue.put((channel, obj)) if endmarker is NO_ENDMARKER_WANTED: ch.setcallback(putreceived) else: ch.setcallback(putreceived, endmarker=endmarker) return self._queue def waitclose(self): first = None for ch in self._channels: try: ch.waitclose() except ch.RemoteError: if first is None: first = sys.exc_info() if first: reraise(*first) def safe_terminate(execmodel, timeout, list_of_paired_functions): workerpool = execmodel.WorkerPool() def termkill(termfunc, killfunc): termreply = workerpool.spawn(termfunc) try: termreply.get(timeout=timeout) except IOError: killfunc() replylist = [] for termfunc, killfunc in list_of_paired_functions: reply = workerpool.spawn(termkill, termfunc, killfunc) replylist.append(reply) for reply in replylist: reply.get() workerpool.waitall() default_group = Group() makegateway = default_group.makegateway set_execmodel = default_group.set_execmodel ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/rsync.py0000644000076500000240000001604412754342136027547 0ustar alfredostaff00000000000000""" 1:N rsync implemenation on top of execnet. (c) 2006-2009, Armin Rigo, Holger Krekel, Maciej Fijalkowski """ import os, stat try: from hashlib import md5 except ImportError: from md5 import md5 try: from queue import Queue except ImportError: from Queue import Queue import execnet.rsync_remote class RSync(object): """ This class allows to send a directory structure (recursively) to one or multiple remote filesystems. There is limited support for symlinks, which means that symlinks pointing to the sourcetree will be send "as is" while external symlinks will be just copied (regardless of existance of such a path on remote side). """ def __init__(self, sourcedir, callback=None, verbose=True): self._sourcedir = str(sourcedir) self._verbose = verbose assert callback is None or hasattr(callback, '__call__') self._callback = callback self._channels = {} self._receivequeue = Queue() self._links = [] def filter(self, path): return True def _end_of_channel(self, channel): if channel in self._channels: # too early! we must have got an error channel.waitclose() # or else we raise one raise IOError('connection unexpectedly closed: %s ' % ( channel.gateway,)) def _process_link(self, channel): for link in self._links: channel.send(link) # completion marker, this host is done channel.send(42) def _done(self, channel): """ Call all callbacks """ finishedcallback = self._channels.pop(channel) if finishedcallback: finishedcallback() channel.waitclose() def _list_done(self, channel): # sum up all to send if self._callback: s = sum([self._paths[i] for i in self._to_send[channel]]) self._callback("list", s, channel) def _send_item(self, channel, data): """ Send one item """ modified_rel_path, checksum = data modifiedpath = os.path.join(self._sourcedir, *modified_rel_path) try: f = open(modifiedpath, 'rb') data = f.read() except IOError: data = None # provide info to progress callback function modified_rel_path = "/".join(modified_rel_path) if data is not None: self._paths[modified_rel_path] = len(data) else: self._paths[modified_rel_path] = 0 if channel not in self._to_send: self._to_send[channel] = [] self._to_send[channel].append(modified_rel_path) #print "sending", modified_rel_path, data and len(data) or 0, checksum if data is not None: f.close() if checksum is not None and checksum == md5(data).digest(): data = None # not really modified else: self._report_send_file(channel.gateway, modified_rel_path) channel.send(data) def _report_send_file(self, gateway, modified_rel_path): if self._verbose: print("%s <= %s" %(gateway, modified_rel_path)) def send(self, raises=True): """ Sends a sourcedir to all added targets. Flag indicates whether to raise an error or return in case of lack of targets """ if not self._channels: if raises: raise IOError("no targets available, maybe you " "are trying call send() twice?") return # normalize a trailing '/' away self._sourcedir = os.path.dirname(os.path.join(self._sourcedir, 'x')) # send directory structure and file timestamps/sizes self._send_directory_structure(self._sourcedir) # paths and to_send are only used for doing # progress-related callbacks self._paths = {} self._to_send = {} # send modified file to clients while self._channels: channel, req = self._receivequeue.get() if req is None: self._end_of_channel(channel) else: command, data = req if command == "links": self._process_link(channel) elif command == "done": self._done(channel) elif command == "ack": if self._callback: self._callback("ack", self._paths[data], channel) elif command == "list_done": self._list_done(channel) elif command == "send": self._send_item(channel, data) del data else: assert "Unknown command %s" % command def add_target(self, gateway, destdir, finishedcallback=None, **options): """ Adds a remote target specified via a gateway and a remote destination directory. """ for name in options: assert name in ('delete',) def itemcallback(req): self._receivequeue.put((channel, req)) channel = gateway.remote_exec(execnet.rsync_remote) channel.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False) channel.setcallback(itemcallback, endmarker = None) channel.send((str(destdir), options)) self._channels[channel] = finishedcallback def _broadcast(self, msg): for channel in self._channels: channel.send(msg) def _send_link(self, linktype, basename, linkpoint): self._links.append((linktype, basename, linkpoint)) def _send_directory(self, path): # dir: send a list of entries names = [] subpaths = [] for name in os.listdir(path): p = os.path.join(path, name) if self.filter(p): names.append(name) subpaths.append(p) mode = os.lstat(path).st_mode self._broadcast([mode] + names) for p in subpaths: self._send_directory_structure(p) def _send_link_structure(self, path): linkpoint = os.readlink(path) basename = path[len(self._sourcedir) + 1:] if linkpoint.startswith(self._sourcedir): self._send_link("linkbase", basename, linkpoint[len(self._sourcedir) + 1:]) else: # relative or absolute link, just send it self._send_link("link", basename, linkpoint) self._broadcast(None) def _send_directory_structure(self, path): try: st = os.lstat(path) except OSError: self._broadcast((None, 0, 0)) return if stat.S_ISREG(st.st_mode): # regular file: send a mode/timestamp/size pair self._broadcast((st.st_mode, st.st_mtime, st.st_size)) elif stat.S_ISDIR(st.st_mode): self._send_directory(path) elif stat.S_ISLNK(st.st_mode): self._send_link_structure(path) else: raise ValueError("cannot sync %r" % (path,)) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/rsync_remote.py0000644000076500000240000000702412754342136031120 0ustar alfredostaff00000000000000""" (c) 2006-2013, Armin Rigo, Holger Krekel, Maciej Fijalkowski """ def serve_rsync(channel): import os, stat, shutil try: from hashlib import md5 except ImportError: from md5 import md5 destdir, options = channel.receive() modifiedfiles = [] def remove(path): assert path.startswith(destdir) try: os.unlink(path) except OSError: # assume it's a dir shutil.rmtree(path) def receive_directory_structure(path, relcomponents): try: st = os.lstat(path) except OSError: st = None msg = channel.receive() if isinstance(msg, list): if st and not stat.S_ISDIR(st.st_mode): os.unlink(path) st = None if not st: os.makedirs(path) mode = msg.pop(0) if mode: os.chmod(path, mode) entrynames = {} for entryname in msg: destpath = os.path.join(path, entryname) receive_directory_structure(destpath, relcomponents + [entryname]) entrynames[entryname] = True if options.get('delete'): for othername in os.listdir(path): if othername not in entrynames: otherpath = os.path.join(path, othername) remove(otherpath) elif msg is not None: assert isinstance(msg, tuple) checksum = None if st: if stat.S_ISREG(st.st_mode): msg_mode, msg_mtime, msg_size = msg if msg_size != st.st_size: pass elif msg_mtime != st.st_mtime: f = open(path, 'rb') checksum = md5(f.read()).digest() f.close() elif msg_mode and msg_mode != st.st_mode: os.chmod(path, msg_mode) return else: return # already fine else: remove(path) channel.send(("send", (relcomponents, checksum))) modifiedfiles.append((path, msg)) receive_directory_structure(destdir, []) STRICT_CHECK = False # seems most useful this way for py.test channel.send(("list_done", None)) for path, (mode, time, size) in modifiedfiles: data = channel.receive() channel.send(("ack", path[len(destdir) + 1:])) if data is not None: if STRICT_CHECK and len(data) != size: raise IOError('file modified during rsync: %r' % (path,)) f = open(path, 'wb') f.write(data) f.close() try: if mode: os.chmod(path, mode) os.utime(path, (time, time)) except OSError: pass del data channel.send(("links", None)) msg = channel.receive() while msg != 42: # we get symlink _type, relpath, linkpoint = msg path = os.path.join(destdir, relpath) try: remove(path) except OSError: pass if _type == "linkbase": src = os.path.join(destdir, linkpoint) else: assert _type == "link", _type src = linkpoint os.symlink(src, path) msg = channel.receive() channel.send(("done", None)) if __name__ == '__channelexec__': serve_rsync(channel) # noqa ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/0000755000076500000240000000000013312242252027323 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/__init__.py0000644000076500000240000000000212754342136031437 0ustar alfredostaff00000000000000# ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/loop_socketserver.py0000644000076500000240000000063512754342136033464 0ustar alfredostaff00000000000000 import os, sys import subprocess if __name__ == '__main__': directory = os.path.dirname(os.path.abspath(sys.argv[0])) script = os.path.join(directory, 'socketserver.py') while 1: cmdlist = ["python", script] cmdlist.extend(sys.argv[1:]) text = "starting subcommand: " + " ".join(cmdlist) print(text) process = subprocess.Popen(cmdlist) process.wait() ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/quitserver.py0000644000076500000240000000044112754342136032120 0ustar alfredostaff00000000000000""" send a "quit" signal to a remote server """ import sys import socket hostport = sys.argv[1] host, port = hostport.split(':') hostport = (host, int(port)) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(hostport) sock.sendall('"raise KeyboardInterrupt"\n') ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/shell.py0000755000076500000240000000476712754342136031040 0ustar alfredostaff00000000000000#! /usr/bin/env python """ a remote python shell for injection into startserver.py """ import sys, os, socket, select try: clientsock except NameError: print("client side starting") host, port = sys.argv[1].split(':') port = int(port) myself = open(os.path.abspath(sys.argv[0]), 'rU').read() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) sock.sendall(repr(myself)+'\n') print("send boot string") inputlist = [ sock, sys.stdin ] try: while 1: r,w,e = select.select(inputlist, [], []) if sys.stdin in r: line = raw_input() sock.sendall(line + '\n') if sock in r: line = sock.recv(4096) sys.stdout.write(line) sys.stdout.flush() except: import traceback print(traceback.print_exc()) sys.exit(1) print("server side starting") # server side # from traceback import print_exc from threading import Thread class promptagent(Thread): def __init__(self, clientsock): Thread.__init__(self) self.clientsock = clientsock def run(self): print("Entering thread prompt loop") clientfile = self.clientsock.makefile('w') filein = self.clientsock.makefile('r') loc = self.clientsock.getsockname() while 1: try: clientfile.write('%s %s >>> ' % loc) clientfile.flush() line = filein.readline() if len(line)==0: raise EOFError("nothing") #print >>sys.stderr,"got line: " + line if line.strip(): oldout, olderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = clientfile, clientfile try: try: exec(compile(line + '\n','', 'single')) except: print_exc() finally: sys.stdout=oldout sys.stderr=olderr clientfile.flush() except EOFError: #e = sys.exc_info()[1] sys.stderr.write("connection close, prompt thread returns") break #print >>sys.stdout, "".join(apply(format_exception,sys.exc_info())) self.clientsock.close() prompter = promptagent(clientsock) # noqa prompter.start() print("promptagent - thread started") ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/socketserver.py0000755000076500000240000000714212754342136032436 0ustar alfredostaff00000000000000#! /usr/bin/env python """ start socket based minimal readline exec server it can exeuted in 2 modes of operation 1. as normal script, that listens for new connections 2. via existing_gateway.remote_exec (as imported module) """ # this part of the program only executes on the server side # progname = 'socket_readline_exec_server-1.2' import sys, os def get_fcntl(): try: import fcntl except ImportError: fcntl = None return fcntl fcntl = get_fcntl() debug = 0 if debug: # and not os.isatty(sys.stdin.fileno()): f = open('/tmp/execnet-socket-pyout.log', 'w') old = sys.stdout, sys.stderr sys.stdout = sys.stderr = f def print_(*args): print(" ".join(str(arg) for arg in args)) if sys.version_info > (3, 0): exec("""def exec_(source, locs): exec(source, locs)""") else: exec("""def exec_(source, locs): exec source in locs""") def exec_from_one_connection(serversock): print_(progname, 'Entering Accept loop', serversock.getsockname()) clientsock,address = serversock.accept() print_(progname, 'got new connection from %s %s' % address) clientfile = clientsock.makefile('rb') print_("reading line") # rstrip so that we can use \r\n for telnet testing source = clientfile.readline().rstrip() clientfile.close() g = {'clientsock' : clientsock, 'address' : address, 'execmodel': execmodel} source = eval(source) if source: co = compile(source+'\n', source, 'exec') print_(progname, 'compiled source, executing') try: exec_(co, g) # noqa finally: print_(progname, 'finished executing code') # background thread might hold a reference to this (!?) #clientsock.close() def bind_and_listen(hostport, execmodel): socket = execmodel.socket if isinstance(hostport, str): host, port = hostport.split(':') hostport = (host, int(port)) serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # set close-on-exec if hasattr(fcntl, 'FD_CLOEXEC'): old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD) fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) # allow the address to be re-used in a reasonable amount of time if os.name == 'posix' and sys.platform != 'cygwin': serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversock.bind(hostport) serversock.listen(5) return serversock def startserver(serversock, loop=False): try: while 1: try: exec_from_one_connection(serversock) except (KeyboardInterrupt, SystemExit): raise except: if debug: import traceback traceback.print_exc() else: excinfo = sys.exc_info() print_("got exception", excinfo[1]) if not loop: break finally: print_("leaving socketserver execloop") serversock.shutdown(2) if __name__ == '__main__': import sys if len(sys.argv)>1: hostport = sys.argv[1] else: hostport = ':8888' from execnet.gateway_base import get_execmodel execmodel = get_execmodel("thread") serversock = bind_and_listen(hostport, execmodel) startserver(serversock, loop=False) elif __name__=='__channelexec__': execmodel = channel.gateway.execmodel # noqa bindname = channel.receive() # noqa sock = bind_and_listen(bindname, execmodel) port = sock.getsockname() channel.send(port) # noqa startserver(sock) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/socketserverservice.py0000644000076500000240000000643712754342136034022 0ustar alfredostaff00000000000000""" A windows service wrapper for the py.execnet socketserver. To use, run: python socketserverservice.py register net start ExecNetSocketServer """ import sys import win32serviceutil import win32service import win32event import win32evtlogutil import servicemanager import threading import socketserver appname = 'ExecNetSocketServer' class SocketServerService(win32serviceutil.ServiceFramework): _svc_name_ = appname _svc_display_name_ = "%s" % appname _svc_deps_ = ["EventLog"] def __init__(self, args): # The exe-file has messages for the Event Log Viewer. # Register the exe-file as event source. # # Probably it would be better if this is done at installation time, # so that it also could be removed if the service is uninstalled. # Unfortunately it cannot be done in the 'if __name__ == "__main__"' # block below, because the 'frozen' exe-file does not run this code. # win32evtlogutil.AddSourceToRegistry(self._svc_display_name_, servicemanager.__file__, "Application") win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self.WAIT_TIME = 1000 # in milliseconds def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) def SvcDoRun(self): # Redirect stdout and stderr to prevent "IOError: [Errno 9] # Bad file descriptor". Windows services don't have functional # output streams. sys.stdout = sys.stderr = open('nul', 'w') # Write a 'started' event to the event log... win32evtlogutil.ReportEvent(self._svc_display_name_, servicemanager.PYS_SERVICE_STARTED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) print("Begin: %s" % (self._svc_display_name_)) hostport = ':8888' print('Starting py.execnet SocketServer on %s' % hostport) serversock = socketserver.bind_and_listen(hostport) thread = threading.Thread(target=socketserver.startserver, args=(serversock,), kwargs={'loop':True}) thread.setDaemon(True) thread.start() # wait to be stopped or self.WAIT_TIME to pass while True: result = win32event.WaitForSingleObject(self.hWaitStop, self.WAIT_TIME) if result == win32event.WAIT_OBJECT_0: break # write a 'stopped' event to the event log. win32evtlogutil.ReportEvent(self._svc_display_name_, servicemanager.PYS_SERVICE_STOPPED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) print("End: %s" % appname) if __name__ == '__main__': # Note that this code will not be run in the 'frozen' exe-file!!! win32serviceutil.HandleCommandLine(SocketServerService) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/xx.py0000644000076500000240000000024412754342136030347 0ustar alfredostaff00000000000000import rlcompleter2 rlcompleter2.setup() import register, sys try: hostport = sys.argv[1] except: hostport = ':8888' gw = register.ServerGateway(hostport) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/xspec.py0000644000076500000240000000340012754342136027523 0ustar alfredostaff00000000000000""" (c) 2008-2013, holger krekel """ class XSpec: """ Execution Specification: key1=value1//key2=value2 ... * keys need to be unique within the specification scope * neither key nor value are allowed to contain "//" * keys are not allowed to contain "=" * keys are not allowed to start with underscore * if no "=value" is given, assume a boolean True value """ # XXX allow customization, for only allow specific key names popen = ssh = socket = python = chdir = nice = \ dont_write_bytecode = execmodel = None def __init__(self, string): self._spec = string self.env = {} for keyvalue in string.split("//"): i = keyvalue.find("=") if i == -1: key, value = keyvalue, True else: key, value = keyvalue[:i], keyvalue[i+1:] if key[0] == "_": raise AttributeError("%r not a valid XSpec key" % key) if key in self.__dict__: raise ValueError("duplicate key: %r in %r" %(key, string)) if key.startswith("env:"): self.env[key[4:]] = value else: setattr(self, key, value) def __getattr__(self, name): if name[0] == "_": raise AttributeError(name) return None def __repr__(self): return "" %(self._spec,) def __str__(self): return self._spec def __hash__(self): return hash(self._spec) def __eq__(self, other): return self._spec == getattr(other, '_spec', None) def __ne__(self, other): return self._spec != getattr(other, '_spec', None) def _samefilesystem(self): return bool(self.popen and not self.chdir) ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/log.py0000644000076500000240000000173612754342135023475 0ustar alfredostaff00000000000000 def reporting(conn, result, timeout=None): timeout = timeout or conn.global_timeout # -1 a.k.a. wait for ever log_map = { 'debug': conn.logger.debug, 'error': conn.logger.error, 'warning': conn.logger.warning } while True: try: received = result.receive(timeout) level_received, message = list(received.items())[0] if not isinstance(message, str): message = message.decode('utf-8') log_map[level_received](message.strip('\n')) except EOFError: break except Exception as err: # the things we need to do here :( # because execnet magic, we cannot catch this as # `except TimeoutError` if err.__class__.__name__ == 'TimeoutError': msg = 'No data was received after %s seconds, disconnecting...' % timeout conn.logger.warning(msg) break raise ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/process.py0000644000076500000240000001566612754342135024401 0ustar alfredostaff00000000000000import traceback from .log import reporting from .util import admin_command, RemoteError def _remote_run(channel, cmd, **kw): import subprocess import sys from select import select stop_on_nonzero = kw.pop('stop_on_nonzero', True) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, **kw ) while True: reads, _, _ = select( [process.stdout.fileno(), process.stderr.fileno()], [], [] ) for descriptor in reads: if descriptor == process.stdout.fileno(): read = process.stdout.readline() if read: channel.send({'debug': read}) sys.stdout.flush() if descriptor == process.stderr.fileno(): read = process.stderr.readline() if read: channel.send({'warning': read}) sys.stderr.flush() if process.poll() is not None: # ensure we do not have anything pending in stdout or stderr # unfortunately, we cannot abstract this repetitive loop into its # own function because execnet does not allow for non-global (or # even nested functions). This must be repeated here. while True: err_read = out_read = None for descriptor in reads: if descriptor == process.stdout.fileno(): out_read = process.stdout.readline() if out_read: channel.send({'debug': out_read}) sys.stdout.flush() if descriptor == process.stderr.fileno(): err_read = process.stderr.readline() if err_read: channel.send({'warning': err_read}) sys.stderr.flush() # At this point we have gone through all the possible # descriptors and `read` was empty, so we now can break out of # this since all stdout/stderr has been properly flushed to # logging if not err_read and not out_read: break break returncode = process.wait() if returncode != 0: if stop_on_nonzero: raise RuntimeError( "command returned non-zero exit status: %s" % returncode ) else: channel.send({'warning': "command returned non-zero exit status: %s" % returncode}) def extend_path(conn, arguments): """ get the remote environment's env so we can explicitly add the path without wiping out everything """ # retrieve the remote environment variables for the host try: result = conn.gateway.remote_exec("import os; channel.send(os.environ.copy())") env = result.receive() except Exception: conn.logger.exception('failed to retrieve the remote environment variables') env = {} # get the $PATH and extend it (do not overwrite) path = env.get('PATH', '') env['PATH'] = path + '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin' arguments['env'] = env return arguments def run(conn, command, exit=False, timeout=None, **kw): """ A real-time-logging implementation of a remote subprocess.Popen call where a command is just executed on the remote end and no other handling is done. :param conn: A connection oject :param command: The command to pass in to the remote subprocess.Popen :param exit: If this call should close the connection at the end :param timeout: How many seconds to wait after no remote data is received (defaults to wait for ever) """ stop_on_error = kw.pop('stop_on_error', True) if not kw.get('env'): # get the remote environment's env so we can explicitly add # the path without wiping out everything kw = extend_path(conn, kw) timeout = timeout or conn.global_timeout conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command))) result = conn.execute(_remote_run, cmd=command, **kw) try: reporting(conn, result, timeout) except Exception: remote_trace = traceback.format_exc() remote_error = RemoteError(remote_trace) if remote_error.exception_name == 'RuntimeError': conn.logger.error(remote_error.exception_line) else: for tb_line in remote_trace.split('\n'): conn.logger.error(tb_line) if stop_on_error: raise RuntimeError( 'Failed to execute command: %s' % ' '.join(command) ) if exit: conn.exit() def _remote_check(channel, cmd, **kw): import subprocess process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw ) stdout = process.stdout.read().splitlines() stderr = process.stderr.read().splitlines() channel.send((stdout, stderr, process.wait())) def check(conn, command, exit=False, timeout=None, **kw): """ Execute a remote command with ``subprocess.Popen`` but report back the results in a tuple with three items: stdout, stderr, and exit status. This helper function *does not* provide any logging as it is the caller's responsibility to do so. """ stop_on_error = kw.pop('stop_on_error', True) timeout = timeout or conn.global_timeout if not kw.get('env'): # get the remote environment's env so we can explicitly add # the path without wiping out everything kw = extend_path(conn, kw) conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command))) result = conn.execute(_remote_check, cmd=command, **kw) response = None try: response = result.receive(timeout) except Exception as err: # the things we need to do here :( # because execnet magic, we cannot catch this as # `except TimeoutError` if err.__class__.__name__ == 'TimeoutError': msg = 'No data was received after %s seconds, disconnecting...' % timeout conn.logger.warning(msg) # there is no stdout, stderr, or exit code but make the exit code # an error condition (non-zero) regardless return [], [], -1 else: remote_trace = traceback.format_exc() remote_error = RemoteError(remote_trace) if remote_error.exception_name == 'RuntimeError': conn.logger.error(remote_error.exception_line) else: for tb_line in remote_trace.split('\n'): conn.logger.error(tb_line) if stop_on_error: raise RuntimeError( 'Failed to execute command: %s' % ' '.join(command) ) if exit: conn.exit() return response ceph-deploy-2.0.1/ceph_deploy/lib/vendor/remoto/util.py0000644000076500000240000000200612754342135023660 0ustar alfredostaff00000000000000 def admin_command(sudo, command): """ If sudo is needed, make sure the command is prepended correctly, otherwise return the command as it came. :param sudo: A boolean representing the intention of having a sudo command (or not) :param command: A list of the actual command to execute with Popen. """ if sudo: if not isinstance(command, list): command = [command] return ['sudo'] + [cmd for cmd in command] return command class RemoteError(object): def __init__(self, traceback): self.orig_traceback = traceback self.exception_line = '' self.exception_name = self.get_exception_name() def get_exception_name(self): for tb_line in reversed(self.orig_traceback.split('\n')): if tb_line: for word in tb_line.split(): if word.endswith(':'): # exception! self.exception_line = tb_line return word.strip().strip(':') ceph-deploy-2.0.1/ceph_deploy/mds.py0000644000076500000240000001403513277045417020127 0ustar alfredostaff00000000000000import logging import os from ceph_deploy import conf from ceph_deploy import exc from ceph_deploy import hosts from ceph_deploy.util import system from ceph_deploy.lib import remoto from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def get_bootstrap_mds_key(cluster): """ Read the bootstrap-mds key for `cluster`. """ path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster) try: with open(path, 'rb') as f: return f.read() except IOError: raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'') def create_mds(distro, name, cluster, init): conn = distro.conn path = '/var/lib/ceph/mds/{cluster}-{name}'.format( cluster=cluster, name=name ) conn.remote_module.safe_mkdir(path) bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=cluster ) keypath = os.path.join(path, 'keyring') stdout, stderr, returncode = remoto.process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-mds', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'mds.{name}'.format(name=name), 'osd', 'allow rwx', 'mds', 'allow', 'mon', 'allow profile mds', '-o', os.path.join(keypath), ] ) if returncode > 0: for line in stderr: conn.logger.error(line) for line in stdout: # yes stdout as err because this is an error conn.logger.error(line) conn.logger.error('exit code from command was: %s' % returncode) raise RuntimeError('could not create mds') conn.remote_module.touch_file(os.path.join(path, 'done')) conn.remote_module.touch_file(os.path.join(path, init)) if init == 'upstart': remoto.process.run( conn, [ 'initctl', 'emit', 'ceph-mds', 'cluster={cluster}'.format(cluster=cluster), 'id={name}'.format(name=name), ], timeout=7 ) elif init == 'sysvinit': remoto.process.run( conn, [ 'service', 'ceph', 'start', 'mds.{name}'.format(name=name), ], timeout=7 ) if distro.is_el: system.enable_service(distro.conn) elif init == 'systemd': remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph-mds@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'start', 'ceph-mds@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph.target', ], timeout=7 ) def mds_create(args): conf_data = conf.ceph.load_raw(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 failed_on_rhel = False for hostname, name in args.mds: try: distro = None distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: if distro and distro.normalized_name == 'redhat': LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) failed_on_rhel = True LOG.error(e) errors += 1 if errors: if failed_on_rhel: # because users only read the last few lines :( LOG.error( 'RHEL RHCS systems do not have the ability to deploy MDS yet' ) raise exc.GenericError('Failed to create %d MDSs' % errors) def mds(args): if args.subcommand == 'create': mds_create(args) else: LOG.error('subcommand %s not implemented', args.subcommand) def colon_separated(s): host = s name = s if s.count(':') == 1: (host, name) = s.split(':') return (host, name) @priority(30) def make(parser): """ Ceph MDS daemon management """ mds_parser = parser.add_subparsers(dest='subcommand') mds_parser.required = True mds_create = mds_parser.add_parser( 'create', help='Deploy Ceph MDS on remote host(s)' ) mds_create.add_argument( 'mds', metavar='HOST[:NAME]', nargs='+', type=colon_separated, help='host (and optionally the daemon name) to deploy on', ) parser.set_defaults( func=mds, ) ceph-deploy-2.0.1/ceph_deploy/mgr.py0000644000076500000240000001404013277045417020125 0ustar alfredostaff00000000000000import logging import os from ceph_deploy import conf from ceph_deploy import exc from ceph_deploy import hosts from ceph_deploy.util import system from ceph_deploy.lib import remoto from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def get_bootstrap_mgr_key(cluster): """ Read the bootstrap-mgr key for `cluster`. """ path = '{cluster}.bootstrap-mgr.keyring'.format(cluster=cluster) try: with open(path, 'rb') as f: return f.read() except IOError: raise RuntimeError('bootstrap-mgr keyring not found; run \'gatherkeys\'') def create_mgr(distro, name, cluster, init): conn = distro.conn path = '/var/lib/ceph/mgr/{cluster}-{name}'.format( cluster=cluster, name=name ) conn.remote_module.safe_makedirs(path) bootstrap_keyring = '/var/lib/ceph/bootstrap-mgr/{cluster}.keyring'.format( cluster=cluster ) keypath = os.path.join(path, 'keyring') stdout, stderr, returncode = remoto.process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-mgr', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'mgr.{name}'.format(name=name), 'mon', 'allow profile mgr', 'osd', 'allow *', 'mds', 'allow *', '-o', os.path.join(keypath), ] ) if returncode > 0: for line in stderr: conn.logger.error(line) for line in stdout: # yes stdout as err because this is an error conn.logger.error(line) conn.logger.error('exit code from command was: %s' % returncode) raise RuntimeError('could not create mgr') conn.remote_module.touch_file(os.path.join(path, 'done')) conn.remote_module.touch_file(os.path.join(path, init)) if init == 'upstart': remoto.process.run( conn, [ 'initctl', 'emit', 'ceph-mgr', 'cluster={cluster}'.format(cluster=cluster), 'id={name}'.format(name=name), ], timeout=7 ) elif init == 'sysvinit': remoto.process.run( conn, [ 'service', 'ceph', 'start', 'mgr.{name}'.format(name=name), ], timeout=7 ) if distro.is_el: system.enable_service(distro.conn) elif init == 'systemd': remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph-mgr@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'start', 'ceph-mgr@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph.target', ], timeout=7 ) def mgr_create(args): conf_data = conf.ceph.load_raw(args) LOG.debug( 'Deploying mgr, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mgr), ) key = get_bootstrap_mgr_key(cluster=args.cluster) bootstrapped = set() errors = 0 failed_on_rhel = False for hostname, name in args.mgr: try: distro = None distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mgr bootstrap to %s', hostname) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mgr/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('mgr keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mgr(distro, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: if distro and distro.normalized_name == 'redhat': LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) failed_on_rhel = True LOG.error(e) errors += 1 if errors: if failed_on_rhel: # because users only read the last few lines :( LOG.error( 'RHEL RHCS systems do not have the ability to deploy MGR yet' ) raise exc.GenericError('Failed to create %d MGRs' % errors) def mgr(args): if args.subcommand == 'create': mgr_create(args) else: LOG.error('subcommand %s not implemented', args.subcommand) def colon_separated(s): host = s name = s if s.count(':') == 1: (host, name) = s.split(':') return (host, name) @priority(30) def make(parser): """ Ceph MGR daemon management """ mgr_parser = parser.add_subparsers(dest='subcommand') mgr_parser.required = True mgr_create = mgr_parser.add_parser( 'create', help='Deploy Ceph MGR on remote host(s)' ) mgr_create.add_argument( 'mgr', metavar='HOST[:NAME]', nargs='+', type=colon_separated, help='host (and optionally the daemon name) to deploy on', ) parser.set_defaults( func=mgr, ) ceph-deploy-2.0.1/ceph_deploy/misc.py0000644000076500000240000000103212732774614020273 0ustar alfredostaff00000000000000 def mon_hosts(mons): """ Iterate through list of MON hosts, return tuples of (name, host). """ for m in mons: if m.count(':'): (name, host) = m.split(':') else: name = m host = m if name.count('.') > 0: name = name.split('.')[0] yield (name, host) def remote_shortname(socket): """ Obtains remote hostname of the socket and cuts off the domain part of its FQDN. """ return socket.gethostname().split('.', 1)[0] ceph-deploy-2.0.1/ceph_deploy/mon.py0000644000076500000240000004576312755712041020143 0ustar alfredostaff00000000000000import json import logging import re import os import time from ceph_deploy import conf, exc, admin from ceph_deploy.cliutil import priority from ceph_deploy.util.help_formatters import ToggleRawTextHelpFormatter from ceph_deploy.util import paths, net, files, packages, system from ceph_deploy.lib import remoto from ceph_deploy.new import new_mon_keyring from ceph_deploy import hosts from ceph_deploy.misc import mon_hosts from ceph_deploy import gatherkeys LOG = logging.getLogger(__name__) def mon_status_check(conn, logger, hostname, args): """ A direct check for JSON output on the monitor status. For newer versions of Ceph (dumpling and newer) a new mon_status command was added ( `ceph daemon mon mon_status` ) and should be revisited if the output changes as this check depends on that availability. """ asok_path = paths.mon.asok(args.cluster, hostname) out, err, code = remoto.process.check( conn, [ 'ceph', '--cluster={cluster}'.format(cluster=args.cluster), '--admin-daemon', asok_path, 'mon_status', ], ) for line in err: logger.error(line) try: return json.loads(b''.join(out).decode('utf-8')) except ValueError: return {} def catch_mon_errors(conn, logger, hostname, cfg, args): """ Make sure we are able to catch up common mishaps with monitors and use that state of a monitor to determine what is missing and warn apropriately about it. """ monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {}) mon_initial_members = get_mon_initial_members(args, _cfg=cfg) public_addr = cfg.safe_get('global', 'public_addr') public_network = cfg.safe_get('global', 'public_network') mon_in_monmap = [ mon.get('name') for mon in monmap.get('mons', [{}]) if mon.get('name') == hostname ] if mon_initial_members is None or not hostname in mon_initial_members: logger.warning('%s is not defined in `mon initial members`', hostname) if not mon_in_monmap: logger.warning('monitor %s does not exist in monmap', hostname) if not public_addr and not public_network: logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors') logger.warning('monitors may not be able to form quorum') def mon_status(conn, logger, hostname, args, silent=False): """ run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide not only the output, but be able to return a boolean status of what is going on. ``False`` represents a monitor that is not doing OK even if it is up and running, while ``True`` would mean the monitor is up and running correctly. """ mon = 'mon.%s' % hostname try: out = mon_status_check(conn, logger, hostname, args) if not out: logger.warning('monitor: %s, might not be running yet' % mon) return False if not silent: logger.debug('*'*80) logger.debug('status for monitor: %s' % mon) for line in json.dumps(out, indent=2, sort_keys=True).split('\n'): logger.debug(line) logger.debug('*'*80) if out['rank'] >= 0: logger.info('monitor: %s is running' % mon) return True if out['rank'] == -1 and out['state']: logger.info('monitor: %s is currently at the state of %s' % (mon, out['state'])) return True logger.info('monitor: %s is not running' % mon) return False except RuntimeError: logger.info('monitor: %s is not running' % mon) return False def keyring_parser(path): """ This is a very, very, dumb parser that will look for `[entity]` sections and return a list of those sections. It is not possible to parse this with ConfigParser even though it is almost the same thing. Since this is only used to spit out warnings, it is OK to just be naive about the parsing. """ sections = [] with open(path) as keyring: lines = keyring.readlines() for line in lines: line = line.strip('\n') if line.startswith('[') and line.endswith(']'): sections.append(line.strip('[]')) return sections def concatenate_keyrings(args): """ A helper to collect all keyrings into a single blob that will be used to inject it to mons with ``--mkfs`` on remote nodes We require all keyring files to be concatenated to be in a directory to end with ``.keyring``. """ keyring_path = os.path.abspath(args.keyrings) LOG.info('concatenating keyrings from %s' % keyring_path) LOG.info('to seed remote monitors') keyrings = [ os.path.join(keyring_path, f) for f in os.listdir(keyring_path) if os.path.isfile(os.path.join(keyring_path, f)) and f.endswith('.keyring') ] contents = [] seen_sections = {} if not keyrings: path_from_arg = os.path.abspath(args.keyrings) raise RuntimeError('could not find any keyrings in %s' % path_from_arg) for keyring in keyrings: path = os.path.abspath(keyring) for section in keyring_parser(path): if not seen_sections.get(section): seen_sections[section] = path LOG.info('adding entity "%s" from keyring %s' % (section, path)) with open(path) as k: contents.append(k.read()) else: LOG.warning('will not add keyring: %s' % path) LOG.warning('entity "%s" from keyring %s is a duplicate' % (section, path)) LOG.warning('already present in keyring: %s' % seen_sections[section]) return ''.join(contents) def mon_add(args): cfg = conf.ceph.load(args) # args.mon is a list with only one entry mon_host = args.mon[0] try: with open('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError( 'mon keyring not found; run \'new\' to create a new cluster' ) LOG.info('ensuring configuration of new mon host: %s', mon_host) args.client = args.mon admin.admin(args) LOG.debug( 'Adding mon to cluster %s, host %s', args.cluster, mon_host, ) mon_section = 'mon.%s' % mon_host cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr') if args.address: LOG.debug('using mon address via --address %s' % args.address) mon_ip = args.address elif cfg_mon_addr: LOG.debug('using mon address via configuration: %s' % cfg_mon_addr) mon_ip = cfg_mon_addr else: mon_ip = net.get_nonlocal_ip(mon_host) LOG.debug('using mon address by resolving host: %s' % mon_ip) try: LOG.debug('detecting platform for host %s ...', mon_host) distro = hosts.get( mon_host, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(mon_host) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, mon_host) rlogger.debug('adding mon to %s', mon_host) args.address = mon_ip distro.mon.add(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args) mon_status(distro.conn, rlogger, mon_host, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) raise exc.GenericError('Failed to add monitor to host: %s' % mon_host) def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster) try: monitor_keyring = files.read_file(keyring_path) except IOError: LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) new_mon_keyring(args) monitor_keyring = files.read_file(keyring_path) LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start mon_status(distro.conn, rlogger, name, args) catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors) def hostname_is_compatible(conn, logger, provided_hostname): """ Make sure that the host that we are connecting to has the same value as the `hostname` in the remote host, otherwise mons can fail not reaching quorum. """ logger.debug('determining if provided host has same hostname in remote') remote_hostname = conn.remote_module.shortname() if remote_hostname == provided_hostname: return logger.warning('*'*80) logger.warning('provided hostname must match remote hostname') logger.warning('provided hostname: %s' % provided_hostname) logger.warning('remote hostname: %s' % remote_hostname) logger.warning('monitors may not reach quorum and create-keys will not complete') logger.warning('*'*80) def destroy_mon(conn, cluster, hostname): import datetime import time retries = 5 path = paths.mon.path(cluster, hostname) if conn.remote_module.path_exists(path): # remove from cluster remoto.process.run( conn, [ 'ceph', '--cluster={cluster}'.format(cluster=cluster), '-n', 'mon.', '-k', '{path}/keyring'.format(path=path), 'mon', 'remove', hostname, ], timeout=7, ) # stop if conn.remote_module.path_exists(os.path.join(path, 'upstart')) or system.is_upstart(conn): status_args = [ 'initctl', 'status', 'ceph-mon', 'cluster={cluster}'.format(cluster=cluster), 'id={hostname}'.format(hostname=hostname), ] elif conn.remote_module.path_exists(os.path.join(path, 'sysvinit')): status_args = [ 'service', 'ceph', 'status', 'mon.{hostname}'.format(hostname=hostname), ] elif system.is_systemd(conn): status_args = [ 'systemctl', 'stop', 'ceph-mon@{hostname}.service'.format(hostname=hostname), ] else: raise RuntimeError('could not detect a supported init system, cannot continue') while retries: conn.logger.info('polling the daemon to verify it stopped') if is_running(conn, status_args): time.sleep(5) retries -= 1 if retries <= 0: raise RuntimeError('ceph-mon deamon did not stop') else: break # archive old monitor directory fn = '{cluster}-{hostname}-{stamp}'.format( hostname=hostname, cluster=cluster, stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"), ) remoto.process.run( conn, [ 'mkdir', '-p', '/var/lib/ceph/mon-removed', ], ) conn.remote_module.make_mon_removed_dir(path, fn) def mon_destroy(args): errors = 0 for (name, host) in mon_hosts(args.mon): try: LOG.debug('Removing mon from %s', name) distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) hostname = distro.conn.remote_module.shortname() destroy_mon( distro.conn, args.cluster, hostname, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to destroy %d monitors' % errors) def mon_create_initial(args): mon_initial_members = get_mon_initial_members(args, error_on_empty=True) # create them normally through mon_create args.mon = mon_initial_members mon_create(args) # make the sets to be able to compare late mon_in_quorum = set([]) mon_members = set([host for host in mon_initial_members]) for host in mon_initial_members: mon_name = 'mon.%s' % host LOG.info('processing monitor %s', mon_name) sleeps = [20, 20, 15, 10, 10, 5] tries = 5 rlogger = logging.getLogger(host) distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) while tries: status = mon_status_check(distro.conn, rlogger, host, args) has_reached_quorum = status.get('state', '') in ['peon', 'leader'] if not has_reached_quorum: LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries)) tries -= 1 sleep_seconds = sleeps.pop() LOG.warning('waiting %s seconds before retrying', sleep_seconds) time.sleep(sleep_seconds) # Magic number else: mon_in_quorum.add(host) LOG.info('%s monitor has reached quorum!', mon_name) break distro.conn.exit() if mon_in_quorum == mon_members: LOG.info('all initial monitors are running and have formed quorum') LOG.info('Running gatherkeys...') gatherkeys.gatherkeys(args) else: LOG.error('Some monitors have still not reached quorum:') for host in mon_members - mon_in_quorum: LOG.error('%s', host) raise SystemExit('cluster may not be in a healthy state') def mon(args): if args.subcommand == 'create': mon_create(args) elif args.subcommand == 'add': mon_add(args) elif args.subcommand == 'destroy': mon_destroy(args) elif args.subcommand == 'create-initial': mon_create_initial(args) else: LOG.error('subcommand %s not implemented', args.subcommand) @priority(30) def make(parser): """ Ceph MON Daemon management """ parser.formatter_class = ToggleRawTextHelpFormatter mon_parser = parser.add_subparsers(dest='subcommand') mon_parser.required = True mon_add = mon_parser.add_parser( 'add', help=('R|Add a monitor to an existing cluster:\n' '\tceph-deploy mon add node1\n' 'Or:\n' '\tceph-deploy mon add --address 192.168.1.10 node1\n' 'If the section for the monitor exists and defines a `mon addr` that\n' 'will be used, otherwise it will fallback by resolving the hostname to an\n' 'IP. If `--address` is used it will override all other options.') ) mon_add.add_argument( '--address', nargs='?', ) mon_add.add_argument( 'mon', nargs=1, ) mon_create = mon_parser.add_parser( 'create', help=('R|Deploy monitors by specifying them like:\n' '\tceph-deploy mon create node1 node2 node3\n' 'If no hosts are passed it will default to use the\n' '`mon initial members` defined in the configuration.') ) mon_create.add_argument( '--keyrings', nargs='?', help='concatenate multiple keyrings to be seeded on new monitors', ) mon_create.add_argument( 'mon', nargs='*', ) mon_create_initial = mon_parser.add_parser( 'create-initial', help=('Will deploy for monitors defined in `mon initial members`, ' 'wait until they form quorum and then gatherkeys, reporting ' 'the monitor status along the process. If monitors don\'t form ' 'quorum the command will eventually time out.') ) mon_create_initial.add_argument( '--keyrings', nargs='?', help='concatenate multiple keyrings to be seeded on new monitors', ) mon_destroy = mon_parser.add_parser( 'destroy', help='Completely remove Ceph MON from remote host(s)' ) mon_destroy.add_argument( 'mon', nargs='+', ) parser.set_defaults( func=mon, ) # # Helpers # def get_mon_initial_members(args, error_on_empty=False, _cfg=None): """ Read the Ceph config file and return the value of mon_initial_members Optionally, a NeedHostError can be raised if the value is None. """ if _cfg: cfg = _cfg else: cfg = conf.ceph.load(args) mon_initial_members = cfg.safe_get('global', 'mon_initial_members') if not mon_initial_members: if error_on_empty: raise exc.NeedHostError( 'could not find `mon initial members` defined in ceph.conf' ) else: mon_initial_members = re.split(r'[,\s]+', mon_initial_members) return mon_initial_members def is_running(conn, args): """ Run a command to check the status of a mon, return a boolean. We heavily depend on the format of the output, if that ever changes we need to modify this. Check daemon status for 3 times output of the status should be similar to:: mon.mira094: running {"version":"0.61.5"} or when it fails:: mon.mira094: dead {"version":"0.61.5"} mon.mira094: not running {"version":"0.61.5"} """ stdout, stderr, _ = remoto.process.check( conn, args ) result_string = b' '.join(stdout) for run_check in [b': running', b' start/running']: if run_check in result_string: return True return False ceph-deploy-2.0.1/ceph_deploy/new.py0000644000076500000240000002061712754333353020136 0ustar alfredostaff00000000000000import errno import logging import os import uuid import struct import time import base64 import socket from ceph_deploy.cliutil import priority from ceph_deploy import conf, hosts, exc from ceph_deploy.util import arg_validators, ssh, net from ceph_deploy.misc import mon_hosts from ceph_deploy.lib import remoto from ceph_deploy.connection import get_local_connection LOG = logging.getLogger(__name__) def generate_auth_key(): key = os.urandom(16) header = struct.pack( ' up_osds: difference = osds - up_osds logger.warning('there %s %d OSD%s down' % ( ['is', 'are'][difference != 1], difference, "s"[difference == 1:]) ) if osds > in_osds: difference = osds - in_osds logger.warning('there %s %d OSD%s out' % ( ['is', 'are'][difference != 1], difference, "s"[difference == 1:]) ) if full: logger.warning('OSDs are full!') if nearfull: logger.warning('OSDs are near full!') def create_osd( conn, cluster, data, journal, zap, fs_type, dmcrypt, dmcrypt_dir, storetype, block_wal, block_db, **kw): """ Run on osd node, creates an OSD from a data disk. """ ceph_volume_executable = system.executable_path(conn, 'ceph-volume') args = [ ceph_volume_executable, '--cluster', cluster, 'lvm', 'create', '--%s' % storetype, '--data', data ] if zap: LOG.warning('zapping is no longer supported when preparing') if dmcrypt: args.append('--dmcrypt') # TODO: re-enable dmcrypt support once ceph-volume grows it LOG.warning('dmcrypt is currently not supported') if storetype == 'bluestore': if block_wal: args.append('--block.wal') args.append(block_wal) if block_db: args.append('--block.db') args.append(block_db) elif storetype == 'filestore': if not journal: raise RuntimeError('A journal lv or GPT partition must be specified when using filestore') args.append('--journal') args.append(journal) if kw.get('debug'): remoto.process.run( conn, args, env={'CEPH_VOLUME_DEBUG': '1'} ) else: remoto.process.run( conn, args ) def create(args, cfg, create=False): if not args.host: raise RuntimeError('Required host was not specified as a positional argument') LOG.debug( 'Creating OSD on cluster %s with data device %s', args.cluster, args.data ) key = get_bootstrap_osd_key(cluster=args.cluster) bootstrapped = set() errors = 0 hostname = args.host try: if args.data is None: raise exc.NeedDiskError(hostname) distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('Deploying osd to %s', hostname) conf_data = conf.ceph.load_raw(args) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf ) create_osd_keyring(distro.conn, args.cluster, key) # default to bluestore unless explicitly told not to storetype = 'bluestore' if args.filestore: storetype = 'filestore' create_osd( distro.conn, cluster=args.cluster, data=args.data, journal=args.journal, zap=args.zap_disk, fs_type=args.fs_type, dmcrypt=args.dmcrypt, dmcrypt_dir=args.dmcrypt_key_dir, storetype=storetype, block_wal=args.block_wal, block_db=args.block_db, debug=args.debug, ) # give the OSD a few seconds to start time.sleep(5) catch_osd_errors(distro.conn, distro.conn.logger, args) LOG.debug('Host %s is now ready for osd use.', hostname) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d OSDs' % errors) def disk_zap(args): hostname = args.host for disk in args.disk: if not disk or not hostname: raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk)) LOG.debug('zapping %s on %s', disk, hostname) distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) distro.conn.remote_module.zeroing(disk) ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume') if args.debug: remoto.process.run( distro.conn, [ ceph_volume_executable, 'lvm', 'zap', disk, ], env={'CEPH_VOLUME_DEBUG': '1'} ) else: remoto.process.run( distro.conn, [ ceph_volume_executable, 'lvm', 'zap', disk, ], ) distro.conn.exit() def disk_list(args, cfg): command = ['fdisk', '-l'] for hostname in args.host: distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) out, err, code = remoto.process.check( distro.conn, command, ) for line in out: if line.startswith('Disk /'): distro.conn.logger.info(line) def osd_list(args, cfg): for hostname in args.host: distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname)) ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume') if args.debug: remoto.process.run( distro.conn, [ ceph_volume_executable, 'lvm', 'list', ], env={'CEPH_VOLUME_DEBUG': '1'} ) else: remoto.process.run( distro.conn, [ ceph_volume_executable, 'lvm', 'list', ], ) distro.conn.exit() def osd(args): cfg = conf.ceph.load(args) if args.subcommand == 'list': osd_list(args, cfg) elif args.subcommand == 'create': create(args, cfg) else: LOG.error('subcommand %s not implemented', args.subcommand) sys.exit(1) def disk(args): cfg = conf.ceph.load(args) if args.subcommand == 'list': disk_list(args, cfg) elif args.subcommand == 'create': create(args, cfg) elif args.subcommand == 'zap': disk_zap(args) else: LOG.error('subcommand %s not implemented', args.subcommand) sys.exit(1) @priority(50) def make(parser): """ Prepare a data disk on remote host. """ sub_command_help = dedent(""" Create OSDs from a data disk on a remote host: ceph-deploy osd create {node} --data /path/to/device For bluestore, optional devices can be used:: ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device For filestore, the journal must be specified, as well as the objectstore:: ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal For data devices, it can be an existing logical volume in the format of: vg/lv, or a device. For other OSD components like wal, db, and journal, it can be logical volume (in vg/lv format) or it must be a GPT partition. """ ) parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.description = sub_command_help osd_parser = parser.add_subparsers(dest='subcommand') osd_parser.required = True osd_list = osd_parser.add_parser( 'list', help='List OSD info from remote host(s)' ) osd_list.add_argument( 'host', nargs='+', metavar='HOST', help='remote host(s) to list OSDs from' ) osd_list.add_argument( '--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls', ) osd_create = osd_parser.add_parser( 'create', help='Create new Ceph OSD daemon by preparing and activating a device' ) osd_create.add_argument( '--data', metavar='DATA', help='The OSD data logical volume (vg/lv) or absolute path to device' ) osd_create.add_argument( '--journal', help='Logical Volume (vg/lv) or path to GPT partition', ) osd_create.add_argument( '--zap-disk', action='store_true', help='DEPRECATED - cannot zap when creating an OSD' ) osd_create.add_argument( '--fs-type', metavar='FS_TYPE', choices=['xfs', 'btrfs' ], default='xfs', help='filesystem to use to format DEVICE (xfs, btrfs)', ) osd_create.add_argument( '--dmcrypt', action='store_true', help='use dm-crypt on DEVICE', ) osd_create.add_argument( '--dmcrypt-key-dir', metavar='KEYDIR', default='/etc/ceph/dmcrypt-keys', help='directory where dm-crypt keys are stored', ) osd_create.add_argument( '--filestore', action='store_true', default=None, help='filestore objectstore', ) osd_create.add_argument( '--bluestore', action='store_true', default=None, help='bluestore objectstore', ) osd_create.add_argument( '--block-db', default=None, help='bluestore block.db path' ) osd_create.add_argument( '--block-wal', default=None, help='bluestore block.wal path' ) osd_create.add_argument( 'host', nargs='?', metavar='HOST', help='Remote host to connect' ) osd_create.add_argument( '--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls', ) parser.set_defaults( func=osd, ) @priority(50) def make_disk(parser): """ Manage disks on a remote host. """ disk_parser = parser.add_subparsers(dest='subcommand') disk_parser.required = True disk_zap = disk_parser.add_parser( 'zap', help='destroy existing data and filesystem on LV or partition', ) disk_zap.add_argument( 'host', nargs='?', metavar='HOST', help='Remote HOST(s) to connect' ) disk_zap.add_argument( 'disk', nargs='+', metavar='DISK', help='Disk(s) to zap' ) disk_zap.add_argument( '--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls', ) disk_list = disk_parser.add_parser( 'list', help='List disk info from remote host(s)' ) disk_list.add_argument( 'host', nargs='+', metavar='HOST', help='Remote HOST(s) to list OSDs from' ) disk_list.add_argument( '--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls', ) parser.set_defaults( func=disk, ) ceph-deploy-2.0.1/ceph_deploy/pkg.py0000644000076500000240000000456412656121033020120 0ustar alfredostaff00000000000000import logging from . import hosts LOG = logging.getLogger(__name__) def install(args): packages = args.install.split(',') for hostname in args.hosts: distro = hosts.get(hostname, username=args.username) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) rlogger = logging.getLogger(hostname) rlogger.info('installing packages on %s' % hostname) # Do not timeout on package install. If you we this command to install # e.g. ceph-selinux or some other package with long post script we can # easily timeout in the 5 minutes that we use as a default timeout, # turning off the timeout completely for the time we run the command # should make this much more safe. distro.conn.global_timeout = None distro.packager.install(packages) distro.conn.exit() def remove(args): packages = args.remove.split(',') for hostname in args.hosts: distro = hosts.get(hostname, username=args.username) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) rlogger = logging.getLogger(hostname) rlogger.info('removing packages from %s' % hostname) # Do not timeout on package removal. If we use this command to remove # e.g. ceph-selinux or some other package with long post script we can # easily timeout in the 5 minutes that we use as a default timeout, # turning off the timeout completely for the time we run the command # should make this much more safe. distro.conn.global_timeout = None distro.packager.remove(packages) distro.conn.exit() def pkg(args): if args.install: install(args) elif args.remove: remove(args) def make(parser): """ Manage packages on remote hosts. """ action = parser.add_mutually_exclusive_group() action.add_argument( '--install', metavar='PKG(s)', help='Comma-separated package(s) to install', ) action.add_argument( '--remove', metavar='PKG(s)', help='Comma-separated package(s) to remove', ) parser.add_argument( 'hosts', nargs='+', ) parser.set_defaults( func=pkg, ) ceph-deploy-2.0.1/ceph_deploy/repo.py0000644000076500000240000000563512620214647020311 0ustar alfredostaff00000000000000import os import logging from ceph_deploy import hosts from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def install_repo(distro, args, cd_conf, rlogger): if args.repo_name in cd_conf.get_repos(): LOG.info('will use repository %s from ceph-deploy config', args.repo_name) options = dict(cd_conf.items(args.repo_name)) extra_repos = cd_conf.get_list(args.repo_name, 'extra-repos') try: repo_url = options.pop('baseurl') gpg_url = options.pop('gpgkey', None) except KeyError as err: raise RuntimeError( 'missing required key: %s in config section: %s' % (err, args.repo_name) ) else: repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url extra_repos = [] repo_url = repo_url.strip('/') # Remove trailing slashes distro.packager.add_repo( args.repo_name, repo_url, gpg_url=gpg_url ) for xrepo in extra_repos: rlogger.info('adding extra repo: %s' % xrepo) options = dict(cd_conf.items(xrepo)) try: repo_url = options.pop('baseurl') gpg_url = options.pop('gpgkey', None) except KeyError as err: raise RuntimeError( 'missing required key: %s in config section: %s' % (err, xrepo) ) distro.packager.add_repo( args.repo_name, repo_url, gpg_url=gpg_url ) def repo(args): cd_conf = getattr(args, 'cd_conf', None) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username ) rlogger = logging.getLogger(hostname) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) if args.remove: distro.packager.remove_repo(args.repo_name) else: install_repo(distro, args, cd_conf, rlogger) @priority(70) def make(parser): """ Repo definition management """ parser.add_argument( 'repo_name', metavar='REPO-NAME', help='Name of repo to manage. Can match an entry in cephdeploy.conf' ) parser.add_argument( '--repo-url', help='a repo URL that mirrors/contains Ceph packages' ) parser.add_argument( '--gpg-url', help='a GPG key URL to be used with custom repos' ) parser.add_argument( '--remove', '--delete', action='store_true', help='remove repo definition on remote host' ) parser.add_argument( 'host', metavar='HOST', nargs='+', help='host(s) to install on' ) parser.set_defaults( func=repo ) ceph-deploy-2.0.1/ceph_deploy/rgw.py0000644000076500000240000001446313243310454020135 0ustar alfredostaff00000000000000import errno import logging import os from ceph_deploy import conf from ceph_deploy import exc from ceph_deploy import hosts from ceph_deploy.util import system from ceph_deploy.lib import remoto from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def get_bootstrap_rgw_key(cluster): """ Read the bootstrap-rgw key for `cluster`. """ path = '{cluster}.bootstrap-rgw.keyring'.format(cluster=cluster) try: with open(path, 'rb') as f: return f.read() except IOError: raise RuntimeError('bootstrap-rgw keyring not found; run \'gatherkeys\'') def create_rgw(distro, name, cluster, init): conn = distro.conn path = '/var/lib/ceph/radosgw/{cluster}-{name}'.format( cluster=cluster, name=name ) conn.remote_module.safe_makedirs(path) bootstrap_keyring = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format( cluster=cluster ) keypath = os.path.join(path, 'keyring') stdout, stderr, returncode = remoto.process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-rgw', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'client.{name}'.format(name=name), 'osd', 'allow rwx', 'mon', 'allow rw', '-o', os.path.join(keypath), ] ) if returncode > 0 and returncode != errno.EACCES: for line in stderr: conn.logger.error(line) for line in stdout: # yes stdout as err because this is an error conn.logger.error(line) conn.logger.error('exit code from command was: %s' % returncode) raise RuntimeError('could not create rgw') remoto.process.check( conn, [ 'ceph', '--cluster', cluster, '--name', 'client.bootstrap-rgw', '--keyring', bootstrap_keyring, 'auth', 'get-or-create', 'client.{name}'.format(name=name), 'osd', 'allow *', 'mon', 'allow *', '-o', os.path.join(keypath), ] ) conn.remote_module.touch_file(os.path.join(path, 'done')) conn.remote_module.touch_file(os.path.join(path, init)) if init == 'upstart': remoto.process.run( conn, [ 'initctl', 'emit', 'radosgw', 'cluster={cluster}'.format(cluster=cluster), 'id={name}'.format(name=name), ], timeout=7 ) elif init == 'sysvinit': remoto.process.run( conn, [ 'service', 'ceph-radosgw', 'start', ], timeout=7 ) if distro.is_el: system.enable_service(distro.conn, service='ceph-radosgw') elif init == 'systemd': remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph-radosgw@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'start', 'ceph-radosgw@{name}'.format(name=name), ], timeout=7 ) remoto.process.run( conn, [ 'systemctl', 'enable', 'ceph.target', ], timeout=7 ) def rgw_create(args): conf_data = conf.ceph.load_raw(args) LOG.debug( 'Deploying rgw, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.rgw), ) key = get_bootstrap_rgw_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, name in args.rgw: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying rgw bootstrap to %s', hostname) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('rgw keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_rgw(distro, name, args.cluster, distro.init) distro.conn.exit() LOG.info( ('The Ceph Object Gateway (RGW) is now running on host %s and ' 'default port %s'), hostname, '7480' ) except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d RGWs' % errors) def rgw(args): if args.subcommand == 'create': rgw_create(args) else: LOG.error('subcommand %s not implemented', args.subcommand) def colon_separated(s): host = s name = s if s.count(':') == 1: (host, name) = s.split(':') name = 'rgw.' + name return (host, name) @priority(30) def make(parser): """ Ceph RGW daemon management """ rgw_parser = parser.add_subparsers(dest='subcommand') rgw_parser.required = True rgw_create = rgw_parser.add_parser( 'create', help='Create an RGW instance' ) rgw_create.add_argument( 'rgw', metavar='HOST[:NAME]', nargs='+', type=colon_separated, help='host (and optionally the daemon name) to deploy on. \ NAME is automatically prefixed with \'rgw.\'', ) parser.set_defaults( func=rgw, ) ceph-deploy-2.0.1/ceph_deploy/tests/0000755000076500000240000000000013312242252020113 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/tests/__init__.py0000644000076500000240000000000012620214647022222 0ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/tests/conftest.py0000644000076500000240000000472613277045417022341 0ustar alfredostaff00000000000000import logging import os import subprocess import sys import pytest LOG = logging.getLogger(__name__) def _prepend_path(env): """ Make sure the PATH contains the location where the Python binary lives. This makes sure cli tools installed in a virtualenv work. """ if env is None: env = os.environ env = dict(env) new = os.path.dirname(sys.executable) path = env.get('PATH') if path is not None: new = new + ':' + path env['PATH'] = new return env class CLIFailed(Exception): """CLI tool failed""" def __init__(self, args, status): self.args = args self.status = status def __str__(self): return '{doc}: {args}: exited with status {status}'.format( doc=self.__doc__, args=self.args, status=self.status, ) class CLIProcess(object): def __init__(self, **kw): self.kw = kw def __enter__(self): try: self.p = subprocess.Popen(**self.kw) except OSError as e: raise AssertionError( 'CLI tool {args!r} does not work: {err}'.format( args=self.kw['args'], err=e, ), ) else: return self.p def __exit__(self, exc_type, exc_val, exc_tb): self.p.wait() if self.p.returncode != 0: err = CLIFailed( args=self.kw['args'], status=self.p.returncode, ) if exc_type is None: # nothing else raised, so we should complain; if # something else failed, we'll just log raise err else: LOG.error(str(err)) class CLITester(object): # provide easy way for caller to access the exception class # without importing us Failed = CLIFailed def __init__(self, tmpdir): self.tmpdir = tmpdir def __call__(self, **kw): kw.setdefault('cwd', str(self.tmpdir)) kw['env'] = _prepend_path(kw.get('env')) kw['env']['COLUMNS'] = '80' return CLIProcess(**kw) @pytest.fixture def cli(request): """ Test command line behavior. """ # the tmpdir here will be the same value as the test function # sees; we rely on that to let caller prepare and introspect # any files the cli tool will read or create tmpdir = request.getfuncargvalue('tmpdir') return CLITester(tmpdir=tmpdir) ceph-deploy-2.0.1/ceph_deploy/tests/directory.py0000644000076500000240000000035612620214647022505 0ustar alfredostaff00000000000000import contextlib import os @contextlib.contextmanager def directory(path): prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY) try: os.chdir(path) yield finally: os.fchdir(prev) os.close(prev) ceph-deploy-2.0.1/ceph_deploy/tests/fakes.py0000644000076500000240000000026012754333353021570 0ustar alfredostaff00000000000000 def fake_getaddrinfo(*a, **kw): return_host = kw.get('return_host', 'host1') return [[0,0,0,0, return_host]] def fake_arg_val_hostname(self, host): return host ceph-deploy-2.0.1/ceph_deploy/tests/parser/0000755000076500000240000000000013312242253021410 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/tests/parser/__init__.py0000644000076500000240000000000012620214647023516 0ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_admin.py0000644000076500000240000000205312754333353024124 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserAdmin(object): def setup(self): self.parser = get_parser() def test_admin_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('admin --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy admin' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_admin_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('admin'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_admin_one_host(self): args = self.parser.parse_args('admin host1'.split()) assert args.client == ['host1'] def test_admin_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['admin'] + hostnames) assert args.client == hostnames ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_calamari.py0000644000076500000240000000351012754333353024604 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserCalamari(object): def setup(self): self.parser = get_parser() def test_calamari_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('calamari --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy calamari' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_calamari_connect_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('calamari connect --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy calamari connect' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_calamari_connect_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('calamari connect'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_calamari_connect_one_host(self): args = self.parser.parse_args('calamari connect host1'.split()) assert args.hosts == ['host1'] def test_calamari_connect_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('calamari connect'.split() + hostnames) assert args.hosts == hostnames def test_calamari_connect_master_default_is_none(self): args = self.parser.parse_args('calamari connect host1'.split()) assert args.master is None def test_calamari_connect_master_custom(self): args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split()) assert args.master == "master.ceph.com" ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_config.py0000644000076500000240000000414612754333353024306 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments SUBCMDS_WITH_ARGS = ['push', 'pull'] class TestParserConfig(object): def setup(self): self.parser = get_parser() def test_config_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('config --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy config' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) def test_config_subcommands_with_args(self, cmd): self.parser.parse_args(['config'] + ['%s' % cmd] + ['host1']) def test_config_invalid_subcommand(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('config bork'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_config_push_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('config push'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_config_push_one_host(self): args = self.parser.parse_args('config push host1'.split()) assert args.client == ['host1'] def test_config_push_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('config push'.split() + hostnames) assert args.client == hostnames def test_config_pull_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('config pull'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_config_pull_one_host(self): args = self.parser.parse_args('config pull host1'.split()) assert args.client == ['host1'] def test_config_pull_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('config pull'.split() + hostnames) assert args.client == hostnames ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_disk.py0000644000076500000240000000611613277045417023774 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments SUBCMDS_WITH_ARGS = ['list', 'zap'] class TestParserDisk(object): def setup(self): self.parser = get_parser() def test_disk_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy disk' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) def test_disk_valid_subcommands_with_args(self, cmd): self.parser.parse_args(['disk'] + ['%s' % cmd] + ['host1']) def test_disk_invalid_subcommand(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk bork'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_disk_list_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk list --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy disk list' in out def test_disk_list_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk list'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_disk_list_single_host(self): args = self.parser.parse_args('disk list host1'.split()) assert args.host[0] == 'host1' assert args.debug is False def test_disk_list_single_host_debug(self): args = self.parser.parse_args('disk list --debug host1'.split()) assert args.host[0] == 'host1' assert args.debug is True def test_disk_list_multi_host(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('disk list'.split() + hostnames) assert args.host == hostnames def test_disk_zap_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk zap --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy disk zap' in out def test_disk_zap_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('disk zap'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_disk_zap_single_host(self): args = self.parser.parse_args('disk zap host1 /dev/sdb'.split()) assert args.disk[0] == '/dev/sdb' assert args.host == 'host1' assert args.debug is False def test_disk_zap_multi_host(self): host = 'host1' disks = ['/dev/sda1', '/dev/sda2'] args = self.parser.parse_args(['disk', 'zap', host] + disks) assert args.disk == disks def test_disk_zap_debug_true(self): args = \ self.parser.parse_args('disk zap --debug host1 /dev/sdb'.split()) assert args.disk[0] == '/dev/sdb' assert args.host == 'host1' assert args.debug is True ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_gatherkeys.py0000644000076500000240000000212312754333353025200 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserGatherKeys(object): def setup(self): self.parser = get_parser() def test_gather_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy gatherkeys' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_gatherkeys_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_gatherkeys_one_host(self): args = self.parser.parse_args('gatherkeys host1'.split()) assert args.mon == ['host1'] def test_gatherkeys_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['gatherkeys'] + hostnames) assert args.mon == hostnames ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_install.py0000644000076500000240000001435712754333353024514 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments COMP_FLAGS = [ 'mon', 'mds', 'rgw', 'osd', 'common', 'all' ] class TestParserInstall(object): def setup(self): self.parser = get_parser() def test_install_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('install --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy install' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_install_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('install'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_install_one_host(self): args = self.parser.parse_args('install host1'.split()) assert args.host == ['host1'] def test_install_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['install'] + hostnames) assert frozenset(args.host) == frozenset(hostnames) def test_install_release_default_is_none(self): args = self.parser.parse_args('install host1'.split()) assert args.release is None assert args.version_kind == "stable" def test_install_release(self): args = self.parser.parse_args('install --release hammer host1'.split()) assert args.release == "hammer" assert args.version_kind == "stable" @pytest.mark.skipif(reason="No release name sanity checking yet") def test_install_release_bad_codename(self): args = self.parser.parse_args('install --release cephalopod host1'.split()) assert args.release != "cephalopod" def test_install_testing_default_is_none(self): args = self.parser.parse_args('install host1'.split()) assert args.testing is None assert args.version_kind == "stable" def test_install_testing_true(self): args = self.parser.parse_args('install --testing host1'.split()) assert len(args.testing) == 0 assert args.version_kind == "testing" def test_install_dev_disabled_by_default(self): args = self.parser.parse_args('install host1'.split()) # dev defaults to master, but version_kind nullifies it assert args.dev == "master" assert args.version_kind == "stable" def test_install_dev_custom_version(self): args = self.parser.parse_args('install --dev v0.80.8 host1'.split()) assert args.dev == "v0.80.8" assert args.version_kind == "dev" @pytest.mark.skipif(reason="test reflects desire, but not code reality") def test_install_dev_option_default_is_master(self): # I don't think this is the way argparse works. args = self.parser.parse_args('install --dev host1'.split()) assert args.dev == "master" assert args.version_kind == "dev" def test_install_release_testing_mutex(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('install --release hammer --testing host1'.split()) out, err = capsys.readouterr() assert 'not allowed with argument' in err def test_install_release_dev_mutex(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('install --release hammer --dev master host1'.split()) out, err = capsys.readouterr() assert 'not allowed with argument' in err def test_install_testing_dev_mutex(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('install --testing --dev master host1'.split()) out, err = capsys.readouterr() assert 'not allowed with argument' in err @pytest.mark.parametrize('comp', COMP_FLAGS) def test_install_component_default_is_false(self, comp): args = self.parser.parse_args('install host1'.split()) assert getattr(args, 'install_%s' % comp) is False @pytest.mark.parametrize('comp', COMP_FLAGS) def test_install_component_true(self, comp): args = self.parser.parse_args(('install --%s host1' % comp).split()) assert getattr(args, 'install_%s' % comp) is True def test_install_multi_component(self): args = self.parser.parse_args(('install --mon --rgw host1').split()) assert args.install_mon assert args.install_rgw def test_install_adjust_repos_default_is_true(self): args = self.parser.parse_args('install host1'.split()) assert args.adjust_repos def test_install_adjust_repos_false(self): args = self.parser.parse_args('install --no-adjust-repos host1'.split()) assert not args.adjust_repos def test_install_adjust_repos_false_with_custom_release(self): args = self.parser.parse_args('install --release firefly --no-adjust-repos host1'.split()) assert args.release == "firefly" assert not args.adjust_repos def test_install_repo_default_is_false(self): args = self.parser.parse_args('install host1'.split()) assert not args.repo def test_install_repo_true(self): args = self.parser.parse_args('install --repo host1'.split()) assert args.repo def test_install_local_mirror_default_is_none(self): args = self.parser.parse_args('install host1'.split()) assert args.local_mirror is None def test_install_local_mirror_custom_path(self): args = self.parser.parse_args('install --local-mirror /mnt/mymirror host1'.split()) assert args.local_mirror == "/mnt/mymirror" def test_install_repo_url_default_is_none(self): args = self.parser.parse_args('install host1'.split()) assert args.repo_url is None def test_install_repo_url_custom_path(self): args = self.parser.parse_args('install --repo-url https://ceph.com host1'.split()) assert args.repo_url == "https://ceph.com" def test_install_gpg_url_default_is_none(self): args = self.parser.parse_args('install host1'.split()) assert args.gpg_url is None def test_install_gpg_url_custom_path(self): args = self.parser.parse_args('install --gpg-url https://ceph.com/key host1'.split()) assert args.gpg_url == "https://ceph.com/key" ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_main.py0000644000076500000240000000672013243310457023757 0ustar alfredostaff00000000000000import pytest import ceph_deploy from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments SUBCMDS_WITH_ARGS = [ 'new', 'install', 'rgw', 'mds', 'mon', 'gatherkeys', 'disk', 'osd', 'admin', 'config', 'uninstall', 'purgedata', 'purge', 'pkg', 'calamari' ] SUBCMDS_WITHOUT_ARGS = ['forgetkeys'] class TestParserMain(object): def setup(self): self.parser = get_parser() def test_verbose_true(self): args = self.parser.parse_args('--verbose forgetkeys'.split()) assert args.verbose def test_verbose_default_is_false(self): args = self.parser.parse_args('forgetkeys'.split()) assert not args.verbose def test_quiet_true(self): args = self.parser.parse_args('--quiet forgetkeys'.split()) assert args.quiet def test_quiet_default_is_false(self): args = self.parser.parse_args('forgetkeys'.split()) assert not args.quiet def test_verbose_quiet_are_mutually_exclusive(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('--verbose --quiet forgetkeys'.split()) out, err = capsys.readouterr() assert 'not allowed with argument' in err def test_version(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('--version'.split()) out, err = capsys.readouterr() assert ceph_deploy.__version__ in (out.strip(), err.strip()) def test_custom_username(self): args = self.parser.parse_args('--username trhoden forgetkeys'.split()) assert args.username == 'trhoden' def test_default_username_is_none(self): args = self.parser.parse_args('forgetkeys'.split()) assert args.username is None def test_overwrite_conf_default_false(self): args = self.parser.parse_args('forgetkeys'.split()) assert not args.overwrite_conf def test_overwrite_conf_true(self): args = self.parser.parse_args('--overwrite-conf forgetkeys'.split()) assert args.overwrite_conf def test_default_cluster_name(self): args = self.parser.parse_args('forgetkeys'.split()) assert args.cluster == 'ceph' def test_default_ceph_conf_is_none(self): args = self.parser.parse_args('forgetkeys'.split()) assert args.ceph_conf is None def test_custom_ceph_conf(self): args = self.parser.parse_args('--ceph-conf /tmp/ceph.conf forgetkeys'.split()) assert args.ceph_conf == '/tmp/ceph.conf' @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) def test_valid_subcommands_with_args(self, cmd, capsys): with pytest.raises(SystemExit): self.parser.parse_args(['%s' % cmd]) out, err = capsys.readouterr() assert_too_few_arguments(err) assert 'invalid choice' not in err @pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS) def test_valid_subcommands_without_args(self, cmd, capsys): self.parser.parse_args(['%s' % cmd]) def test_invalid_subcommand(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('bork'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('--help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy' in out assert 'optional arguments:' in out assert 'commands:' in out ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_mds.py0000644000076500000240000000230312754333353023615 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserMDS(object): def setup(self): self.parser = get_parser() def test_mds_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mds --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mds' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_mds_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mds create'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_mds_create_one_host(self): args = self.parser.parse_args('mds create host1'.split()) assert args.mds[0][0] == 'host1' def test_mds_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['mds', 'create'] + hostnames) # args.mds is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.mds] assert frozenset(hosts) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_mon.py0000644000076500000240000001111212620214647023615 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser SUBCMDS_WITH_ARGS = ['add', 'destroy', 'create'] SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial'] class TestParserMON(object): def setup(self): self.parser = get_parser() def test_mon_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mon' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) def test_mon_valid_subcommands_with_args(self, cmd, capsys): args = self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1']) assert args.subcommand == cmd @pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS) def test_mon_valid_subcommands_without_args(self, cmd, capsys): args = self.parser.parse_args(['mon'] + ['%s' % cmd]) assert args.subcommand == cmd def test_mon_invalid_subcommand(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon bork'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_mon_create_initial_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon create-initial --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mon create-initial' in out def test_mon_create_initial_keyrings_default_none(self): args = self.parser.parse_args('mon create-initial'.split()) assert args.keyrings is None def test_mon_create_initial_keyrings_custom_dir(self): args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split()) assert args.keyrings == "/tmp/keys" def test_mon_create_initial_keyrings_host_raises_err(self): with pytest.raises(SystemExit): self.parser.parse_args('mon create-initial test1'.split()) def test_mon_create_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon create --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mon create' in out def test_mon_create_keyrings_default_none(self): args = self.parser.parse_args('mon create'.split()) assert args.keyrings is None def test_mon_create_keyrings_custom_dir(self): args = self.parser.parse_args('mon create --keyrings /tmp/keys'.split()) assert args.keyrings == "/tmp/keys" def test_mon_create_single_host(self): args = self.parser.parse_args('mon create test1'.split()) assert args.mon == ['test1'] def test_mon_create_multi_host(self): hosts = ['host1', 'host2', 'host3'] args = self.parser.parse_args('mon create'.split() + hosts) assert args.mon == hosts def test_mon_add_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon add --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mon add' in out def test_mon_add_address_default_none(self): args = self.parser.parse_args('mon add test1'.split()) assert args.address is None def test_mon_add_address_custom_addr(self): args = self.parser.parse_args('mon add test1 --address 10.10.0.1'.split()) assert args.address == '10.10.0.1' def test_mon_add_no_host_raises_err(self): with pytest.raises(SystemExit): self.parser.parse_args('mon add'.split()) def test_mon_add_one_host_okay(self): args = self.parser.parse_args('mon add test1'.split()) assert args.mon == ["test1"] def test_mon_add_multi_host_raises_err(self): with pytest.raises(SystemExit): self.parser.parse_args('mon add test1 test2'.split()) def test_mon_destroy_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('mon destroy --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy mon destroy' in out def test_mon_destroy_no_host_raises_err(self): with pytest.raises(SystemExit): self.parser.parse_args('mon destroy'.split()) def test_mon_destroy_one_host_okay(self): args = self.parser.parse_args('mon destroy test1'.split()) assert args.mon == ["test1"] def test_mon_destroy_multi_host(self): hosts = ['host1', 'host2', 'host3'] args = self.parser.parse_args('mon destroy'.split() + hosts) assert args.mon == hosts ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_new.py0000644000076500000240000000636012754333353023632 0ustar alfredostaff00000000000000import pytest from mock import patch from ceph_deploy.cli import get_parser from ceph_deploy.tests.fakes import fake_arg_val_hostname from ceph_deploy.tests.util import assert_too_few_arguments @patch('ceph_deploy.util.arg_validators.Hostname.__call__', fake_arg_val_hostname) class TestParserNew(object): def setup(self): self.parser = get_parser() def test_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('new --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy new' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_new_copykey_true_by_default(self): args = self.parser.parse_args('new host1'.split()) assert args.ssh_copykey def test_new_copykey_false(self): args = self.parser.parse_args('new --no-ssh-copykey host1'.split()) assert not args.ssh_copykey def test_new_fsid_none_by_default(self): args = self.parser.parse_args('new host1'.split()) assert args.fsid is None def test_new_fsid_custom_fsid(self): args = self.parser.parse_args('new --fsid bc50d015-65c9-457a-bfed-e37b92756527 host1'.split()) assert args.fsid == 'bc50d015-65c9-457a-bfed-e37b92756527' @pytest.mark.skipif(reason="no UUID validation yet") def test_new_fsid_custom_fsid_bad(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('new --fsid bc50d015-65c9-457a-bfed-e37'.split()) out, err = capsys.readouterr() #TODO check for correct error string in err def test_new_networks_none_by_default(self): args = self.parser.parse_args('new host1'.split()) assert args.public_network is None assert args.cluster_network is None def test_new_public_network_custom(self): args = self.parser.parse_args('new --public-network 10.10.0.0/16 host1'.split()) assert args.public_network == "10.10.0.0/16" def test_new_cluster_network_custom(self): args = self.parser.parse_args('new --cluster-network 10.10.0.0/16 host1'.split()) assert args.cluster_network == "10.10.0.0/16" def test_new_public_network_custom_bad(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('new --public-network 10.10.0.0'.split()) out, err = capsys.readouterr() assert "error: subnet must" in err def test_new_cluster_network_custom_bad(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('new --cluster-network 10.10.0.0'.split()) out, err = capsys.readouterr() assert "error: subnet must" in err def test_new_mon_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('new'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_new_one_mon(self): hostnames = ['test1'] args = self.parser.parse_args(['new'] + hostnames) assert args.mon == hostnames def test_new_multiple_mons(self): hostnames = ['test1', 'test2', 'test3'] args = self.parser.parse_args(['new'] + hostnames) assert frozenset(args.mon) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_osd.py0000644000076500000240000000754213277045417023633 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments SUBCMDS_WITH_ARGS = ['list', 'create'] class TestParserOSD(object): def setup(self): self.parser = get_parser() def test_osd_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy osd' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) def test_osd_valid_subcommands_with_args(self, cmd): self.parser.parse_args(['osd'] + ['%s' % cmd] + ['host1']) def test_osd_invalid_subcommand(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd bork'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_osd_list_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd list --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy osd list' in out def test_osd_list_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd list'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_osd_list_single_host(self): args = self.parser.parse_args('osd list host1'.split()) assert args.host[0] == 'host1' def test_osd_list_multi_host(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('osd list'.split() + hostnames) assert args.host == hostnames def test_osd_create_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd create --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy osd create' in out def test_osd_create_single_host(self): args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) assert args.host == 'host1' assert args.data == '/dev/sdb' def test_osd_create_zap_default_false(self): args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) assert args.zap_disk is False def test_osd_create_zap_true(self): args = self.parser.parse_args('osd create --zap-disk host1 --data /dev/sdb'.split()) assert args.zap_disk is True def test_osd_create_fstype_default_xfs(self): args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) assert args.fs_type == "xfs" def test_osd_create_fstype_btrfs(self): args = self.parser.parse_args('osd create --fs-type btrfs host1 --data /dev/sdb'.split()) assert args.fs_type == "btrfs" def test_osd_create_fstype_invalid(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('osd create --fs-type bork host1 --data /dev/sdb'.split()) out, err = capsys.readouterr() assert 'invalid choice' in err def test_osd_create_dmcrypt_default_false(self): args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) assert args.dmcrypt is False def test_osd_create_dmcrypt_true(self): args = self.parser.parse_args('osd create --dmcrypt host1 --data /dev/sdb'.split()) assert args.dmcrypt is True def test_osd_create_dmcrypt_key_dir_default(self): args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) assert args.dmcrypt_key_dir == "/etc/ceph/dmcrypt-keys" def test_osd_create_dmcrypt_key_dir_custom(self): args = self.parser.parse_args('osd create --dmcrypt --dmcrypt-key-dir /tmp/keys host1 --data /dev/sdb'.split()) assert args.dmcrypt_key_dir == "/tmp/keys" ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_pkg.py0000644000076500000240000000471112754333353023620 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserPkg(object): def setup(self): self.parser = get_parser() def test_pkg_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('pkg --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy pkg' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_pkg_install_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('pkg --install pkg1'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_pkg_install_one_host(self): args = self.parser.parse_args('pkg --install pkg1 host1'.split()) assert args.hosts == ['host1'] assert args.install == "pkg1" def test_pkg_install_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('pkg --install pkg1'.split() + hostnames) assert args.hosts == hostnames assert args.install == "pkg1" def test_pkg_install_muliple_pkgs(self): args = self.parser.parse_args('pkg --install pkg1,pkg2 host1'.split()) assert args.install == "pkg1,pkg2" def test_pkg_remove_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('pkg --remove pkg1'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_pkg_remove_one_host(self): args = self.parser.parse_args('pkg --remove pkg1 host1'.split()) assert args.hosts == ['host1'] assert args.remove == "pkg1" def test_pkg_remove_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args('pkg --remove pkg1'.split() + hostnames) assert args.hosts == hostnames assert args.remove == "pkg1" def test_pkg_remove_muliple_pkgs(self): args = self.parser.parse_args('pkg --remove pkg1,pkg2 host1'.split()) assert args.remove == "pkg1,pkg2" def test_pkg_install_remove_are_mutex(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('pkg --install pkg2 --remove pkg1 host1'.split()) out, err = capsys.readouterr() assert "argument --remove: not allowed with argument --install" in err ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_purge.py0000644000076500000240000000207512754333353024162 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserPurge(object): def setup(self): self.parser = get_parser() def test_purge_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('purge --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy purge' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_purge_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('purge'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_purge_one_host(self): args = self.parser.parse_args('purge host1'.split()) assert args.host == ['host1'] def test_purge_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['purge'] + hostnames) assert frozenset(args.host) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_purgedata.py0000644000076500000240000000214512754333353025012 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserPurgeData(object): def setup(self): self.parser = get_parser() def test_purgedata_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('purgedata --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy purgedata' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_purgedata_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('purgedata'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_purgedata_one_host(self): args = self.parser.parse_args('purgedata host1'.split()) assert args.host == ['host1'] def test_purgedata_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['purgedata'] + hostnames) assert frozenset(args.host) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_repo.py0000644000076500000240000000500212754333353023776 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserRepo(object): def setup(self): self.parser = get_parser() def test_repo_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('repo --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy repo' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_repo_name_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('repo'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_repo_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('repo ceph'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_repo_one_host(self): args = self.parser.parse_args('repo ceph host1'.split()) assert args.host == ['host1'] def test_repo_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['repo', 'ceph'] + hostnames) assert frozenset(args.host) == frozenset(hostnames) def test_repo_name(self): args = self.parser.parse_args('repo ceph host1'.split()) assert args.repo_name == 'ceph' def test_repo_remove_default_is_false(self): args = self.parser.parse_args('repo ceph host1'.split()) assert not args.remove def test_repo_remove_set_true(self): args = self.parser.parse_args('repo ceph --remove host1'.split()) assert args.remove def test_repo_remove_delete_alias(self): args = self.parser.parse_args('repo ceph --delete host1'.split()) assert args.remove def test_repo_url_default_is_none(self): args = self.parser.parse_args('repo ceph host1'.split()) assert args.repo_url is None def test_repo_url_custom_path(self): args = self.parser.parse_args('repo ceph --repo-url https://ceph.com host1'.split()) assert args.repo_url == "https://ceph.com" def test_repo_gpg_url_default_is_none(self): args = self.parser.parse_args('repo ceph host1'.split()) assert args.gpg_url is None def test_repo_gpg_url_custom_path(self): args = self.parser.parse_args('repo ceph --gpg-url https://ceph.com/key host1'.split()) assert args.gpg_url == "https://ceph.com/key" ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_rgw.py0000644000076500000240000000230312754333353023631 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/parser/test_uninstall.py0000644000076500000240000000214512754333353025047 0ustar alfredostaff00000000000000import pytest from ceph_deploy.cli import get_parser from ceph_deploy.tests.util import assert_too_few_arguments class TestParserUninstall(object): def setup(self): self.parser = get_parser() def test_uninstall_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('uninstall --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy uninstall' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_uninstall_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('uninstall'.split()) out, err = capsys.readouterr() assert_too_few_arguments(err) def test_uninstall_one_host(self): args = self.parser.parse_args('uninstall host1'.split()) assert args.host == ['host1'] def test_uninstall_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['uninstall'] + hostnames) assert frozenset(args.host) == frozenset(hostnames) ceph-deploy-2.0.1/ceph_deploy/tests/test_cli_admin.py0000644000076500000240000000350612754333353023463 0ustar alfredostaff00000000000000import os import subprocess import pytest from mock import patch, MagicMock, Mock from ceph_deploy.cli import _main as main from ceph_deploy.hosts import remotes from ceph_deploy.tests.directory import directory def test_bad_no_conf(tmpdir, cli): with pytest.raises(cli.Failed) as err: with cli( args=['ceph-deploy', 'admin', 'host1'], stderr=subprocess.PIPE, ) as p: result = p.stderr.read().decode('utf-8') assert 'No such file or directory: \'ceph.conf\'' in result assert err.value.status == 1 def test_bad_no_key(tmpdir, cli): with tmpdir.join('ceph.conf').open('w'): pass with pytest.raises(cli.Failed) as err: with cli( args=['ceph-deploy', 'admin', 'host1'], stderr=subprocess.PIPE, ) as p: result = p.stderr.read().decode('utf-8') assert 'ceph.client.admin.keyring not found' in result assert err.value.status == 1 def test_write_keyring(tmpdir): with tmpdir.join('ceph.conf').open('w'): pass with tmpdir.join('ceph.client.admin.keyring').open('wb'): pass etc_ceph = os.path.join(str(tmpdir), 'etc', 'ceph') os.makedirs(etc_ceph) distro = MagicMock() distro.conn = MagicMock() remotes.write_file.__defaults__ = (0o644, str(tmpdir), -1, -1) distro.conn.remote_module = remotes distro.conn.remote_module.write_conf = Mock() with patch('ceph_deploy.admin.hosts'): with patch('ceph_deploy.admin.hosts.get', MagicMock(return_value=distro)): with directory(str(tmpdir)): main(args=['admin', 'host1']) keyring_file = os.path.join(etc_ceph, 'ceph.client.admin.keyring') assert os.path.exists(keyring_file) file_mode = oct(os.stat(keyring_file).st_mode & 0o777) assert file_mode == oct(0o600) ceph-deploy-2.0.1/ceph_deploy/tests/test_cli_mon.py0000644000076500000240000000370112754333353023161 0ustar alfredostaff00000000000000import subprocess import pytest from mock import Mock, patch from ceph_deploy.cli import _main as main from ceph_deploy.tests.directory import directory from ceph_deploy.tests.util import assert_too_few_arguments #TODO: This test does check that things fail if the .conf file is missing def test_bad_no_conf(tmpdir, cli): with pytest.raises(cli.Failed) as err: with cli( args=['ceph-deploy', 'mon'], stderr=subprocess.PIPE, ) as p: result = p.stderr.read().decode('utf-8') assert 'usage: ceph-deploy' in result assert_too_few_arguments(result) assert err.value.status == 2 def make_fake_connection(platform_information=None): get_connection = Mock() get_connection.return_value = get_connection get_connection.remote_module.platform_information = Mock( return_value=platform_information) return get_connection def test_new(tmpdir, capsys): with tmpdir.join('ceph.conf').open('w') as f: f.write("""\ [global] fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 mon initial members = host1 """) fake_ip_addresses = lambda x: ['10.0.0.1'] try: with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): with patch('ceph_deploy.new.hosts'): with directory(str(tmpdir)): main(['-v', 'new', '--no-ssh-copykey', 'host1']) except SystemExit as e: raise AssertionError('Unexpected exit: %s', e) out, err = capsys.readouterr() err = err.lower() assert 'creating new cluster named ceph' in err assert 'monitor host1 at 10.0.0.1' in err assert 'resolving host host1' in err assert "monitor initial members are ['host1']" in err assert "monitor addrs are ['10.0.0.1']" in err ceph-deploy-2.0.1/ceph_deploy/tests/test_cli_new.py0000644000076500000240000000464613277045417023174 0ustar alfredostaff00000000000000import re import uuid from mock import patch from ceph_deploy import conf from ceph_deploy.cli import _main as main from ceph_deploy.tests.directory import directory import pytest def test_write_global_conf_section(tmpdir): fake_ip_addresses = lambda x: ['10.0.0.1'] with patch('ceph_deploy.new.hosts'): with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): with directory(str(tmpdir)): main(args=['new', 'host1']) with tmpdir.join('ceph.conf').open() as f: cfg = conf.ceph.parse(f) assert cfg.sections() == ['global'] @pytest.fixture def newcfg(request): tmpdir = request.getfuncargvalue('tmpdir') fake_ip_addresses = lambda x: ['10.0.0.1'] def new(*args): with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): with patch('ceph_deploy.new.hosts'): with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): with directory(str(tmpdir)): main(args=['new'] + list(args)) with tmpdir.join('ceph.conf').open() as f: cfg = conf.ceph.parse(f) return cfg return new def test_uuid(newcfg): cfg = newcfg('host1') fsid = cfg.get('global', 'fsid') # make sure it's a valid uuid uuid.UUID(hex=fsid) # make sure it looks pretty, too UUID_RE = re.compile( r'^[0-9a-f]{8}-' + r'[0-9a-f]{4}-' # constant 4 here, we want to enforce randomness and not leak # MACs or time + r'4[0-9a-f]{3}-' + r'[0-9a-f]{4}-' + r'[0-9a-f]{12}$', ) assert UUID_RE.match(fsid) def test_mons(newcfg): cfg = newcfg('node01', 'node07', 'node34') mon_initial_members = cfg.get('global', 'mon_initial_members') assert mon_initial_members == 'node01, node07, node34' def test_defaults(newcfg): cfg = newcfg('host1') assert cfg.get('global', 'auth cluster required') == 'cephx' assert cfg.get('global', 'auth service required') == 'cephx' assert cfg.get('global', 'auth client required') == 'cephx' ceph-deploy-2.0.1/ceph_deploy/tests/test_cli_rgw.py0000644000076500000240000000044312620214647023163 0ustar alfredostaff00000000000000import ceph_deploy.rgw as rgw def test_rgw_prefix_auto(): daemon = rgw.colon_separated("hostname") assert daemon == ("hostname", "rgw.hostname") def test_rgw_prefix_custom(): daemon = rgw.colon_separated("hostname:mydaemon") assert daemon == ("hostname", "rgw.mydaemon") ceph-deploy-2.0.1/ceph_deploy/tests/test_conf.py0000644000076500000240000000313612754333353022470 0ustar alfredostaff00000000000000try: from cStringIO import StringIO except ImportError: from io import StringIO from ceph_deploy import conf def test_simple(): f = StringIO("""\ [foo] bar = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar') == 'baz' def test_indent_space(): f = StringIO("""\ [foo] bar = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar') == 'baz' def test_indent_tab(): f = StringIO("""\ [foo] \tbar = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar') == 'baz' def test_words_underscore(): f = StringIO("""\ [foo] bar_thud = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar_thud') == 'baz' assert cfg.get('foo', 'bar thud') == 'baz' def test_words_space(): f = StringIO("""\ [foo] bar thud = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar_thud') == 'baz' assert cfg.get('foo', 'bar thud') == 'baz' def test_words_many(): f = StringIO("""\ [foo] bar__ thud quux = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar_thud_quux') == 'baz' assert cfg.get('foo', 'bar thud quux') == 'baz' def test_write_words_underscore(): cfg = conf.ceph.CephConf() cfg.add_section('foo') cfg.set('foo', 'bar thud quux', 'baz') f = StringIO() cfg.write(f) f.seek(0) assert f.readlines() == ['[foo]\n', 'bar_thud_quux = baz\n','\n'] def test_section_repeat(): f = StringIO("""\ [foo] bar = bez thud = quux [foo] bar = baz """) cfg = conf.ceph.parse(f) assert cfg.get('foo', 'bar') == 'baz' assert cfg.get('foo', 'thud') == 'quux' ceph-deploy-2.0.1/ceph_deploy/tests/test_gather_keys.py0000644000076500000240000001213013243310456024033 0ustar alfredostaff00000000000000from ceph_deploy import gatherkeys from ceph_deploy import new import mock import pytest import tempfile import os import shutil def get_key_static(keytype, key_path): with open(key_path, 'w') as f: f.write("[%s]\n" % (gatherkeys.keytype_identity(keytype))) f.write("key=fred\n") def get_key_dynamic(keytype, key_path): with open(key_path, 'w', 0o600) as f: f.write("[%s]\n" % (gatherkeys.keytype_identity(keytype))) f.write("key='%s'" % (new.generate_auth_key())) def mock_time_strftime(time_format): return "20160412144231" def mock_get_keys_fail(args, host, dest_dir): return False def mock_get_keys_sucess_static(args, host, dest_dir): for keytype in ["admin", "mon", "osd", "mds", "mgr", "rgw"]: keypath = gatherkeys.keytype_path_to(args, keytype) path = "%s/%s" % (dest_dir, keypath) get_key_static(keytype, path) return True def mock_get_keys_sucess_dynamic(args, host, dest_dir): for keytype in ["admin", "mon", "osd", "mds", "mgr", "rgw"]: keypath = gatherkeys.keytype_path_to(args, keytype) path = "%s/%s" % (dest_dir, keypath) get_key_dynamic(keytype, path) return True class TestGatherKeys(object): """ Since we are testing things that effect the content of the current working directory we should test in a clean empty directory. """ def setup(self): """ Make temp directory for tests and set as current working directory """ self.orginaldir = os.getcwd() self.test_dir = tempfile.mkdtemp() os.chdir(self.test_dir) def teardown(self): """ Set current working directory to old value Remove temp directory and content """ os.chdir(self.orginaldir) shutil.rmtree(self.test_dir) @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_fail) def test_gatherkeys_fail(self): """ Test 'gatherkeys' fails when connecting to mon fails. """ args = mock.Mock() args.cluster = "ceph" args.mon = ['host1'] with pytest.raises(RuntimeError): gatherkeys.gatherkeys(args) @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_sucess_static) def test_gatherkeys_success(self): """ Test 'gatherkeys' succeeds when getinig keys that are always the same. Test 'gatherkeys' does not backup identical keys """ args = mock.Mock() args.cluster = "ceph" args.mon = ['host1'] gatherkeys.gatherkeys(args) dir_content = os.listdir(self.test_dir) assert "ceph.client.admin.keyring" in dir_content assert "ceph.bootstrap-mds.keyring" in dir_content assert "ceph.bootstrap-mgr.keyring" in dir_content assert "ceph.mon.keyring" in dir_content assert "ceph.bootstrap-osd.keyring" in dir_content assert "ceph.bootstrap-rgw.keyring" in dir_content assert len(dir_content) == 6 # Now we repeat as no new keys are generated gatherkeys.gatherkeys(args) dir_content = os.listdir(self.test_dir) assert len(dir_content) == 6 @mock.patch('ceph_deploy.gatherkeys.time.strftime', mock_time_strftime) @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_sucess_dynamic) def test_gatherkeys_backs_up(self): """ Test 'gatherkeys' succeeds when getting keys that are always different. Test 'gatherkeys' does backup keys that are not identical. """ args = mock.Mock() args.cluster = "ceph" args.mon = ['host1'] gatherkeys.gatherkeys(args) dir_content = os.listdir(self.test_dir) assert "ceph.client.admin.keyring" in dir_content assert "ceph.bootstrap-mds.keyring" in dir_content assert "ceph.bootstrap-mgr.keyring" in dir_content assert "ceph.mon.keyring" in dir_content assert "ceph.bootstrap-osd.keyring" in dir_content assert "ceph.bootstrap-rgw.keyring" in dir_content assert len(dir_content) == 6 # Now we repeat as new keys are generated and old # are backed up gatherkeys.gatherkeys(args) dir_content = os.listdir(self.test_dir) mocked_time = mock_time_strftime(None) assert "ceph.client.admin.keyring" in dir_content assert "ceph.bootstrap-mds.keyring" in dir_content assert "ceph.bootstrap-mgr.keyring" in dir_content assert "ceph.mon.keyring" in dir_content assert "ceph.bootstrap-osd.keyring" in dir_content assert "ceph.bootstrap-rgw.keyring" in dir_content assert "ceph.client.admin.keyring-%s" % (mocked_time) in dir_content assert "ceph.bootstrap-mds.keyring-%s" % (mocked_time) in dir_content assert "ceph.bootstrap-mgr.keyring-%s" % (mocked_time) in dir_content assert "ceph.mon.keyring-%s" % (mocked_time) in dir_content assert "ceph.bootstrap-osd.keyring-%s" % (mocked_time) in dir_content assert "ceph.bootstrap-rgw.keyring-%s" % (mocked_time) in dir_content assert len(dir_content) == 12 ceph-deploy-2.0.1/ceph_deploy/tests/test_gather_keys_missing.py0000644000076500000240000001257513243310456025601 0ustar alfredostaff00000000000000from ceph_deploy import gatherkeys from ceph_deploy import new import mock import tempfile import shutil import os import pytest class mock_conn(object): def __init__(self): pass class mock_distro(object): def __init__(self): self.conn = mock_conn() class mock_rlogger(object): def error(self, *arg): return def debug(self, *arg): return def mock_remoto_process_check_success(conn, args): secret = new.generate_auth_key() out = '[mon.]\nkey = %s\ncaps mon = allow *\n' % secret return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_rc_error(conn, args): return [b""], [b"this failed\n"], 1 class TestGatherKeysMissing(object): """ Since we are testing things that effect the content a directory we should test in a clean empty directory. """ def setup(self): """ Make temp directory for tests. """ self.args = mock.Mock() self.distro = mock_distro() self.test_dir = tempfile.mkdtemp() self.rlogger = mock_rlogger() self.keypath_remote = "some_path" def teardown(self): """ Remove temp directory and content """ shutil.rmtree(self.test_dir) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_success_admin(self): keytype = 'admin' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is True keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_success_mds(self): keytype = 'mds' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is True keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_success_mgr(self): keytype = 'mgr' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is True keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_success_osd(self): keytype = 'osd' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is True keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_success_rgw(self): keytype = 'rgw' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is True keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_rc_error) def test_remoto_process_check_rc_error(self): keytype = 'admin' rc = gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) assert rc is False keyname = gatherkeys.keytype_path_to(self.args, keytype) keypath_gen = os.path.join(self.test_dir, keyname) assert not os.path.isfile(keypath_gen) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_fail_identity_missing(self): keytype = 'silly' with pytest.raises(RuntimeError): gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) def test_fail_capabilities_missing(self): keytype = 'mon' with pytest.raises(RuntimeError): gatherkeys.gatherkeys_missing( self.args, self.distro, self.rlogger, self.keypath_remote, keytype, self.test_dir ) ceph-deploy-2.0.1/ceph_deploy/tests/test_gather_keys_with_mon.py0000644000076500000240000002000012754333353025741 0ustar alfredostaff00000000000000from ceph_deploy import gatherkeys from ceph_deploy import new import mock import json import copy remoto_process_check_success_output = { "name": "ceph-node1", "rank": 0, "state": "leader", "election_epoch": 6, "quorum": [ 0, 1, 2 ], "outside_quorum": [], "extra_probe_peers": [ "192.168.99.125:6789\/0", "192.168.99.126:6789\/0" ], "sync_provider": [], "monmap": { "epoch": 1, "fsid": "4dbee7eb-929b-4f3f-ad23-8a4e47235e40", "modified": "2016-04-11 05:35:21.665220", "created": "2016-04-11 05:35:21.665220", "mons": [ { "rank": 0, "name": "host0", "addr": "192.168.99.124:6789\/0" }, { "rank": 1, "name": "host1", "addr": "192.168.99.125:6789\/0" }, { "rank": 2, "name": "host2", "addr": "192.168.99.126:6789\/0" } ] } } class mock_remote_module(object): def get_file(self, path): return self.get_file_result def shortname(self): hostname_split = self.longhostname.split('.') return hostname_split[0] class mock_conn(object): def __init__(self): self.remote_module = mock_remote_module() class mock_distro(object): def __init__(self): self.conn = mock_conn() def mock_hosts_get_file_key_content(host, **kwargs): output = mock_distro() mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % new.generate_auth_key() output.conn.remote_module.get_file_result = mon_keyring.encode('utf-8') output.conn.remote_module.longhostname = host return output def mock_hosts_get_file_key_content_none(host, **kwargs): output = mock_distro() output.conn.remote_module.get_file_result = None output.conn.remote_module.longhostname = host return output def mock_gatherkeys_missing_success(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): return True def mock_gatherkeys_missing_fail(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): return False def mock_remoto_process_check_success(conn, args): out = json.dumps(remoto_process_check_success_output,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_rc_error(conn, args): return [b""], [b"this failed\n"], 1 def mock_remoto_process_check_out_not_json(conn, args): return [b"}bad output{"], [b""], 0 def mock_remoto_process_check_out_missing_quorum(conn, args): outdata = copy.deepcopy(remoto_process_check_success_output) del outdata["quorum"] out = json.dumps(outdata,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_out_missing_quorum_1(conn, args): outdata = copy.deepcopy(remoto_process_check_success_output) del outdata["quorum"][1] out = json.dumps(outdata,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_out_missing_monmap(conn, args): outdata = copy.deepcopy(remoto_process_check_success_output) del outdata["monmap"] out = json.dumps(outdata,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_out_missing_mons(conn, args): outdata = copy.deepcopy(remoto_process_check_success_output) del outdata["monmap"]["mons"] out = json.dumps(outdata,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 def mock_remoto_process_check_out_missing_monmap_host1(conn, args): outdata = copy.deepcopy(remoto_process_check_success_output) del outdata["monmap"]["mons"][1] out = json.dumps(outdata,sort_keys=True, indent=4) return out.encode('utf-8').split(b'\n'), [], 0 class TestGatherKeysWithMon(object): """ Test gatherkeys_with_mon function """ def setup(self): self.args = mock.Mock() self.args.cluster = "ceph" self.args.mon = ['host1'] self.host = 'host1' self.test_dir = '/tmp' @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_success(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is True @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content_none) def test_monkey_none(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_fail) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_missing_fail(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_rc_error) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_rc_error(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_not_json) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_out_not_json(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_quorum) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_out_missing_quorum(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_quorum_1) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_out_missing_quorum_1(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_mons) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_out_missing_mon(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_monmap_host1) @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) def test_remoto_process_check_out_missing_monmap_host1(self): rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) assert rc is False ceph-deploy-2.0.1/ceph_deploy/tests/test_install.py0000644000076500000240000001261213277045417023212 0ustar alfredostaff00000000000000from mock import Mock from ceph_deploy import install class TestSanitizeArgs(object): def setup(self): self.args = Mock() # set the default behavior we set in cli.py self.args.default_release = False self.args.stable = None def test_args_release_not_specified(self): self.args.release = None result = install.sanitize_args(self.args) # XXX # we should get `args.release` to be the latest release # but we don't want to be updating this test every single # time there is a new default value, and we can't programatically # change that. Future improvement: make the default release a # variable in `ceph_deploy/__init__.py` assert result.default_release is True def test_args_release_is_specified(self): self.args.release = 'dumpling' result = install.sanitize_args(self.args) assert result.default_release is False def test_args_release_stable_is_used(self): self.args.stable = 'dumpling' result = install.sanitize_args(self.args) assert result.release == 'dumpling' def test_args_stable_is_not_used(self): self.args.release = 'dumpling' result = install.sanitize_args(self.args) assert result.stable is None class TestDetectComponents(object): def setup(self): self.args = Mock() # default values for install_* flags self.args.install_all = False self.args.install_mds = False self.args.install_mgr = False self.args.install_mon = False self.args.install_osd = False self.args.install_rgw = False self.args.install_tests = False self.args.install_common = False self.args.repo = False self.distro = Mock() def test_install_with_repo_option_returns_no_packages(self): self.args.repo = True result = install.detect_components(self.args, self.distro) assert result == [] def test_install_all_returns_all_packages_deb(self): self.args.install_all = True self.distro.is_rpm = False self.distro.is_deb = True self.distro.is_pkgtarxz = False result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw' ]) def test_install_all_with_other_options_returns_all_packages_deb(self): self.distro.is_rpm = False self.distro.is_deb = True self.distro.is_pkgtarxz = False self.args.install_all = True self.args.install_mds = True self.args.install_mgr = True self.args.install_mon = True self.args.install_osd = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw' ]) def test_install_all_returns_all_packages_rpm(self): self.args.install_all = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' ]) def test_install_all_with_other_options_returns_all_packages_rpm(self): self.args.install_all = True self.args.install_mds = True self.args.install_mon = True self.args.install_mgr = True self.args.install_osd = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' ]) def test_install_all_returns_all_packages_pkgtarxz(self): self.args.install_all = True self.distro.is_rpm = False self.distro.is_deb = False self.distro.is_pkgtarxz = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph', ]) def test_install_all_with_other_options_returns_all_packages_pkgtarxz(self): self.distro.is_rpm = False self.distro.is_deb = False self.distro.is_pkgtarxz = True self.args.install_all = True self.args.install_mds = True self.args.install_mgr = True self.args.install_mon = True self.args.install_osd = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph', ]) def test_install_only_one_component(self): self.args.install_osd = True result = install.detect_components(self.args, self.distro) assert result == ['ceph-osd'] def test_install_a_couple_of_components(self): self.args.install_osd = True self.args.install_mds = True self.args.install_mgr = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted(['ceph-osd', 'ceph-mds', 'ceph-mgr']) def test_install_tests(self): self.args.install_tests = True result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted(['ceph-test']) def test_install_all_should_be_default_when_no_options_passed(self): result = sorted(install.detect_components(self.args, self.distro)) assert result == sorted([ 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' ]) ceph-deploy-2.0.1/ceph_deploy/tests/test_keys_equivalent.py0000644000076500000240000001434512754333353024757 0ustar alfredostaff00000000000000from ceph_deploy import gatherkeys from ceph_deploy import new import tempfile import shutil import pytest def write_key_mon_with_caps(path, secret): mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % secret with open(path, 'w', 0o600) as f: f.write(mon_keyring) def write_key_mon_with_caps_with_tab(path, secret): mon_keyring = '[mon.]\n\tkey = %s\n\tcaps mon = allow *\n' % secret with open(path, 'w', 0o600) as f: f.write(mon_keyring) def write_key_mon_with_caps_with_tab_quote(path, secret): mon_keyring = '[mon.]\n\tkey = %s\n\tcaps mon = "allow *"\n' % secret with open(path, 'w', 0o600) as f: f.write(mon_keyring) def write_key_mon_without_caps(path, secret): mon_keyring = '[mon.]\nkey = %s\n' % secret with open(path, 'w', 0o600) as f: f.write(mon_keyring) class TestKeysEquivalent(object): """ Since we are testing things that effect the content of the current working directory we should test in a clean empty directory. """ def setup(self): """ Make temp directory for tests. """ self.test_dir = tempfile.mkdtemp() def teardown(self): """ Remove temp directory and content """ shutil.rmtree(self.test_dir) def test_identical_with_caps(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_with_caps(key_path_02, secret_01) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is True def test_different_with_caps(self): secret_01 = new.generate_auth_key() secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_with_caps(key_path_02, secret_02) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is False def test_identical_without_caps(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_without_caps(key_path_01, secret_01) write_key_mon_without_caps(key_path_02, secret_01) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is True def test_different_without_caps(self): secret_01 = new.generate_auth_key() secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_without_caps(key_path_01, secret_01) write_key_mon_without_caps(key_path_02, secret_02) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is False def test_identical_mixed_caps(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_without_caps(key_path_02, secret_01) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is True def test_different_mixed_caps(self): secret_01 = new.generate_auth_key() secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_without_caps(key_path_02, secret_02) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is False def test_identical_caps_mixed_tabs(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_with_caps_with_tab(key_path_02, secret_01) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is True def test_different_caps_mixed_tabs(self): secret_01 = new.generate_auth_key() secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps(key_path_01, secret_01) write_key_mon_with_caps_with_tab(key_path_02, secret_02) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is False def test_identical_caps_mixed_quote(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps_with_tab(key_path_01, secret_01) write_key_mon_with_caps_with_tab_quote(key_path_02, secret_01) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is True def test_different_caps_mixed_quote(self): secret_01 = new.generate_auth_key() secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps_with_tab(key_path_01, secret_01) write_key_mon_with_caps_with_tab_quote(key_path_02, secret_02) same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) assert same is False def test_missing_key_1(self): secret_02 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps_with_tab_quote(key_path_02, secret_02) with pytest.raises(IOError): gatherkeys._keyring_equivalent(key_path_01, key_path_02) def test_missing_key_2(self): secret_01 = new.generate_auth_key() key_path_01 = self.test_dir + "/01.keyring" key_path_02 = self.test_dir + "/02.keyring" write_key_mon_with_caps_with_tab_quote(key_path_01, secret_01) with pytest.raises(IOError): gatherkeys._keyring_equivalent(key_path_01, key_path_02) ceph-deploy-2.0.1/ceph_deploy/tests/test_mon.py0000644000076500000240000000656012754333353022340 0ustar alfredostaff00000000000000from ceph_deploy import exc, mon from ceph_deploy.conf.ceph import CephConf from mock import Mock import pytest def make_fake_conf(): return CephConf() # NOTE: If at some point we re-use this helper, move it out # and make it even more generic def make_fake_conn(receive_returns=None): receive_returns = receive_returns or ([b'{}'], [], 0) conn = Mock() conn.return_value = conn conn.execute = conn conn.receive = Mock(return_value=receive_returns) conn.gateway.remote_exec = conn.receive conn.result = Mock(return_value=conn) return conn class TestGetMonInitialMembers(object): def test_assert_if_mon_none_and_empty_True(self): cfg = make_fake_conf() with pytest.raises(exc.NeedHostError): mon.get_mon_initial_members(Mock(), True, cfg) def test_return_if_mon_none_and_empty_false(self): cfg = make_fake_conf() mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) assert mon_initial_members is None def test_single_item_if_mon_not_none(self): cfg = make_fake_conf() cfg.add_section('global') cfg.set('global', 'mon initial members', 'AAAA') mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) assert set(mon_initial_members) == set(['AAAA']) def test_multiple_item_if_mon_not_none(self): cfg = make_fake_conf() cfg.add_section('global') cfg.set('global', 'mon initial members', 'AAAA, BBBB') mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) assert set(mon_initial_members) == set(['AAAA', 'BBBB']) class TestCatchCommonErrors(object): def setup(self): self.logger = Mock() def assert_logger_message(self, logger, msg): calls = logger.call_args_list for log_call in calls: if msg in log_call[0][0]: return True raise AssertionError('"%s" was not found in any of %s' % (msg, calls)) def test_warn_if_no_intial_members(self): fake_conn = make_fake_conn() cfg = make_fake_conf() mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) expected_msg = 'is not defined in `mon initial members`' self.assert_logger_message(self.logger.warning, expected_msg) def test_warn_if_host_not_in_intial_members(self): fake_conn = make_fake_conn() cfg = make_fake_conf() cfg.add_section('global') cfg.set('global', 'mon initial members', 'AAAA') mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) expected_msg = 'is not defined in `mon initial members`' self.assert_logger_message(self.logger.warning, expected_msg) def test_warn_if_not_mon_in_monmap(self): fake_conn = make_fake_conn() cfg = make_fake_conf() mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) expected_msg = 'does not exist in monmap' self.assert_logger_message(self.logger.warning, expected_msg) def test_warn_if_not_public_addr_and_not_public_netw(self): fake_conn = make_fake_conn() cfg = make_fake_conf() cfg.add_section('global') mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) expected_msg = 'neither `public_addr` nor `public_network`' self.assert_logger_message(self.logger.warning, expected_msg) ceph-deploy-2.0.1/ceph_deploy/tests/test_remotes.py0000644000076500000240000001647713277045417023237 0ustar alfredostaff00000000000000from mock import patch from ceph_deploy.hosts import remotes from ceph_deploy.hosts.remotes import platform_information, parse_os_release class FakeExists(object): def __init__(self, existing_paths): self.existing_paths = existing_paths def __call__(self, path): for existing_path in self.existing_paths: if path == existing_path: return path class TestWhich(object): def setup(self): self.exists_module = 'ceph_deploy.hosts.remotes.os.path.exists' def test_finds_absolute_paths(self): exists = FakeExists(['/bin/ls']) with patch(self.exists_module, exists): path = remotes.which('ls') assert path == '/bin/ls' def test_does_not_find_executable(self): exists = FakeExists(['/bin/foo']) with patch(self.exists_module, exists): path = remotes.which('ls') assert path is None class TestPlatformInformation(object): """ tests various inputs that remotes.platform_information handles you can test your OS string by comparing the results with the output from: python -c "import platform; print platform.linux_distribution()" """ def setup(self): pass def test_handles_deb_version_num(self): def fake_distro(): return ('debian', '8.4', '') distro, release, codename = platform_information(fake_distro) assert distro == 'debian' assert release == '8.4' assert codename == 'jessie' def test_handles_deb_version_slash(self): def fake_distro(): return ('debian', 'wheezy/something', '') distro, release, codename = platform_information(fake_distro) assert distro == 'debian' assert release == 'wheezy/something' assert codename == 'wheezy' def test_handles_deb_version_slash_sid(self): def fake_distro(): return ('debian', 'jessie/sid', '') distro, release, codename = platform_information(fake_distro) assert distro == 'debian' assert release == 'jessie/sid' assert codename == 'sid' def test_handles_no_codename(self): def fake_distro(): return ('SlaOS', '99.999', '') distro, release, codename = platform_information(fake_distro) assert distro == 'SlaOS' assert release == '99.999' assert codename == '' # Normal distro strings def test_hanles_centos_64(self): def fake_distro(): return ('CentOS', '6.4', 'Final') distro, release, codename = platform_information(fake_distro) assert distro == 'CentOS' assert release == '6.4' assert codename == 'Final' def test_handles_ubuntu_percise(self): def fake_distro(): return ('Ubuntu', '12.04', 'precise') distro, release, codename = platform_information(fake_distro) assert distro == 'Ubuntu' assert release == '12.04' assert codename == 'precise' class TestParseOsRelease(object): """ test various forms of /etc/os-release """ def setup(self): pass def test_handles_centos_7(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME="CentOS Linux" VERSION="7 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="7" PRETTY_NAME="CentOS Linux 7 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:7" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-7" CENTOS_MANTISBT_PROJECT_VERSION="7" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="7" """) distro, release, codename = parse_os_release(path) assert distro == 'centos' assert release == '7' assert codename == 'core' def test_handles_debian_stretch(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" PRETTY_NAME="Debian GNU/Linux 9 (stretch)" NAME="Debian GNU/Linux" VERSION_ID="9" VERSION="9 (stretch)" ID=debian HOME_URL="https://www.debian.org/" SUPPORT_URL="https://www.debian.org/support" BUG_REPORT_URL="https://bugs.debian.org/" """) distro, release, codename = parse_os_release(path) assert distro == 'debian' assert release == '9' assert codename == 'stretch' def test_handles_fedora_26(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME=Fedora VERSION="26 (Twenty Six)" ID=fedora VERSION_ID=26 PRETTY_NAME="Fedora 26 (Twenty Six)" ANSI_COLOR="0;34" CPE_NAME="cpe:/o:fedoraproject:fedora:26" HOME_URL="https://fedoraproject.org/" BUG_REPORT_URL="https://bugzilla.redhat.com/" REDHAT_BUGZILLA_PRODUCT="Fedora" REDHAT_BUGZILLA_PRODUCT_VERSION=26 REDHAT_SUPPORT_PRODUCT="Fedora" REDHAT_SUPPORT_PRODUCT_VERSION=26 PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy """) distro, release, codename = parse_os_release(path) assert distro == 'fedora' assert release == '26' assert codename == 'twenty six' def test_handles_opensuse_leap_42_2(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME="openSUSE Leap" VERSION="42.2" ID=opensuse ID_LIKE="suse" VERSION_ID="42.2" PRETTY_NAME="openSUSE Leap 42.2" ANSI_COLOR="0;32" CPE_NAME="cpe:/o:opensuse:leap:42.2" BUG_REPORT_URL="https://bugs.opensuse.org" HOME_URL="https://www.opensuse.org/" """) distro, release, codename = parse_os_release(path) assert distro == 'opensuse' assert release == '42.2' assert codename == '42.2' def test_handles_opensuse_tumbleweed(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME="openSUSE Tumbleweed" # VERSION="20170502" ID=opensuse ID_LIKE="suse" VERSION_ID="20170502" PRETTY_NAME="openSUSE Tumbleweed" ANSI_COLOR="0;32" CPE_NAME="cpe:/o:opensuse:tumbleweed:20170502" BUG_REPORT_URL="https://bugs.opensuse.org" HOME_URL="https://www.opensuse.org/" """) distro, release, codename = parse_os_release(path) assert distro == 'opensuse' assert release == '20170502' assert codename == 'tumbleweed' def test_handles_sles_12_sp3(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME="SLES" VERSION="12-SP3" VERSION_ID="12.3" PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" ID="sles" ANSI_COLOR="0;32" CPE_NAME="cpe:/o:suse:sles:12:sp3" """) distro, release, codename = parse_os_release(path) assert distro == 'sles' assert release == '12.3' assert codename == '12-SP3' def test_handles_ubuntu_xenial(self, tmpdir): path = str(tmpdir.join('os_release')) with open(path, 'w') as os_release: os_release.write(""" NAME="Ubuntu" VERSION="16.04 LTS (Xenial Xerus)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 16.04 LTS" VERSION_ID="16.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" UBUNTU_CODENAME=xenial """) distro, release, codename = parse_os_release(path) assert distro == 'ubuntu' assert release == '16.04' assert codename == 'xenial' ceph-deploy-2.0.1/ceph_deploy/tests/util.py0000644000076500000240000000143213010377266021453 0ustar alfredostaff00000000000000 def generate_ips(start_ip, end_ip): start = list(map(int, start_ip.split("."))) end = list(map(int, end_ip.split("."))) temp = start ip_range = [] ip_range.append(start_ip) while temp != end: start[3] += 1 for i in (3, 2, 1): if temp[i] == 256: temp[i] = 0 temp[i-1] += 1 ip_range.append(".".join(map(str, temp))) return ip_range class Empty(object): """ A bare class, with explicit behavior for key/value items to be set at instantiation. """ def __init__(self, **kw): for k, v in kw.items(): setattr(self, k, v) def assert_too_few_arguments(err): assert ("error: too few arguments" in err or "error: the following argument" in err) ceph-deploy-2.0.1/ceph_deploy/util/0000755000076500000240000000000013312242253017727 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/util/__init__.py0000644000076500000240000000000012620214647022035 0ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/util/arg_validators.py0000644000076500000240000000521212620214647023311 0ustar alfredostaff00000000000000import socket import argparse import re class RegexMatch(object): """ Performs regular expression match on value. If the regular expression pattern matches it will it will return an error message that will work with argparse. """ def __init__(self, pattern, statement=None): self.string_pattern = pattern self.pattern = re.compile(pattern) self.statement = statement if not self.statement: self.statement = "must match pattern %s" % self.string_pattern def __call__(self, string): match = self.pattern.search(string) if match: raise argparse.ArgumentError(None, self.statement) return string class Hostname(object): """ Checks wether a given hostname is resolvable in DNS, otherwise raising and argparse error. """ def __init__(self, _socket=None): self.socket = _socket or socket # just used for testing def __call__(self, string): parts = string.split(':', 1) name = parts[0] host = parts[-1] try: self.socket.getaddrinfo(host, 0) except self.socket.gaierror: msg = "hostname: %s is not resolvable" % host raise argparse.ArgumentError(None, msg) try: self.socket.getaddrinfo(name, 0, 0, 0, 0, self.socket.AI_NUMERICHOST) except self.socket.gaierror: return string # not an IP else: msg = '%s must be a hostname not an IP' % name raise argparse.ArgumentError(None, msg) return string class Subnet(object): """ A really dumb validator to ensure that we are receiving a subnet (or something that actually looks like a subnet). It doesn't enforce at all the constraints of proper validation as that has its own set of caveats that are difficult to implement given that ceph-deploy doesn't (should not) include third party dependencies. """ def __call__(self, string): ip = string.split('/')[0] ip_parts = ip.split('.') if len(ip_parts) != 4: err = "subnet must have at least 4 numbers separated by dots like x.x.x.x/xx, but got: %s" % string raise argparse.ArgumentError(None, err) if [i for i in ip_parts[:4] if i.isalpha()]: # only numbers err = "subnet must have digits separated by dots like x.x.x.x/xx, but got: %s" % string raise argparse.ArgumentError(None, err) if len(string.split('/')) != 2: err = "subnet must contain a slash, like x.x.x.x/xx, but got: %s" % string raise argparse.ArgumentError(None, err) return string ceph-deploy-2.0.1/ceph_deploy/util/constants.py0000644000076500000240000000164013277045417022333 0ustar alfredostaff00000000000000from os.path import join from collections import namedtuple # Base Path for ceph base_path = '/var/lib/ceph' # Base run Path base_run_path = '/var/run/ceph' tmp_path = join(base_path, 'tmp') mon_path = join(base_path, 'mon') mgr_path = join(base_path, 'mgr') mds_path = join(base_path, 'mds') osd_path = join(base_path, 'osd') # Default package components to install _base_components = [ 'ceph', 'ceph-osd', 'ceph-mds', 'ceph-mon', ] default_components = namedtuple('DefaultComponents', ['rpm', 'deb', 'pkgtarxz']) # the difference here is because RPMs currently name the radosgw differently than DEBs. # TODO: This needs to get unified once the packaging naming gets consistent default_components.rpm = tuple(_base_components + ['ceph-radosgw']) default_components.deb = tuple(_base_components + ['radosgw']) default_components.pkgtarxz = tuple(['ceph']) gpg_key_base_url = "download.ceph.com/keys/" ceph-deploy-2.0.1/ceph_deploy/util/decorators.py0000644000076500000240000000645512672015015022462 0ustar alfredostaff00000000000000import logging import sys import traceback from functools import wraps def catches(catch=None, handler=None, exit=True, handle_all=False): """ Very simple decorator that tries any of the exception(s) passed in as a single exception class or tuple (containing multiple ones) returning the exception message and optionally handling the problem if it raises with the handler if it is provided. So instead of doing something like this:: def bar(): try: some_call() print "Success!" except TypeError, exc: print "Error while handling some call: %s" % exc sys.exit(1) You would need to decorate it like this to have the same effect:: @catches(TypeError) def bar(): some_call() print "Success!" If multiple exceptions need to be caught they need to be provided as a tuple:: @catches((TypeError, AttributeError)) def bar(): some_call() print "Success!" If adding a handler, it should accept a single argument, which would be the exception that was raised, it would look like:: def my_handler(exc): print 'Handling exception %s' % str(exc) raise SystemExit @catches(KeyboardInterrupt, handler=my_handler) def bar(): some_call() Note that the handler needs to raise its SystemExit if it wants to halt execution, otherwise the decorator would continue as a normal try/except block. :param catch: A tuple with one (or more) Exceptions to catch :param handler: Optional handler to have custom handling of exceptions :param exit: Raise a ``SystemExit`` after handling exceptions :param handle_all: Handle all other exceptions via logging. """ catch = catch or Exception logger = logging.getLogger('ceph_deploy') def decorate(f): @wraps(f) def newfunc(*a, **kw): exit_from_catch = False try: return f(*a, **kw) except catch as e: if handler: return handler(e) else: logger.error(make_exception_message(e)) if exit: exit_from_catch = True sys.exit(1) except Exception: # anything else, no need to save the exception as a variable if handle_all is False: # re-raise if we are not supposed to handle everything raise # Make sure we don't spit double tracebacks if we are raising # SystemExit from the `except catch` block if exit_from_catch: sys.exit(1) str_failure = traceback.format_exc() for line in str_failure.split('\n'): logger.error("%s" % line) sys.exit(1) return newfunc return decorate # # Decorator helpers # def make_exception_message(exc): """ An exception is passed in and this function returns the proper string depending on the result so it is readable enough. """ if str(exc): return '%s: %s\n' % (exc.__class__.__name__, exc) else: return '%s\n' % (exc.__class__.__name__) ceph-deploy-2.0.1/ceph_deploy/util/files.py0000644000076500000240000000011712620214647021411 0ustar alfredostaff00000000000000 def read_file(path): with open(path, 'rb') as f: return f.read() ceph-deploy-2.0.1/ceph_deploy/util/help_formatters.py0000644000076500000240000000262412620214647023512 0ustar alfredostaff00000000000000import argparse class ToggleRawTextHelpFormatter(argparse.HelpFormatter): """ArgParse help formatter that allows raw text in individual help strings Inspired by the SmartFormatter at https://bitbucket.org/ruamel/std.argparse Normally to include newlines in the help output of argparse, you have use argparse.RawDescriptionHelpFormatter. But this means raw text is enabled everywhere, and not just for specific help entries where you might need it. This help formatter allows for you to optional enable/toggle raw text on individual menu items by prefixing the help string with 'R|'. Example: parser.formatter_class = ToggleRawTextHelpFormatter parser.add_argument('--verbose', action=store_true, help='Enable verbose mode') #Above help is formatted just as default argparse.HelpFormatter parser.add_argument('--complex-arg', action=store_true, help=('R|This help description use ' 'newlines and tabs and they will be preserved in' 'the help output.\n\n' '\tHow cool is that?')) """ def _split_lines(self, text, width): if text.startswith('R|'): return text[2:].splitlines() return argparse.HelpFormatter._split_lines(self, text, width) ceph-deploy-2.0.1/ceph_deploy/util/log.py0000644000076500000240000000374113075726615021106 0ustar alfredostaff00000000000000import logging import sys BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) COLORS = { 'WARNING': YELLOW, 'INFO': WHITE, 'DEBUG': BLUE, 'CRITICAL': RED, 'ERROR': RED, 'FATAL': RED, } RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" BASE_COLOR_FORMAT = "[$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s" BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s" FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT def supports_color(): """ Returns True if the running system's terminal supports color, and False otherwise. """ unsupported_platform = (sys.platform in ('win32', 'Pocket PC')) # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if unsupported_platform or not is_a_tty: return False return True def color_message(message): message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ) return message class ColoredFormatter(logging.Formatter): """ A very basic logging formatter that not only applies color to the levels of the ouput but will also truncate the level names so that they do not alter the visuals of logging when presented on the terminal. """ def __init__(self, msg): logging.Formatter.__init__(self, msg) def format(self, record): levelname = record.levelname truncated_level = record.levelname[:6] levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ record.color_levelname = levelname_color return logging.Formatter.format(self, record) def color_format(): """ Main entry point to get a colored formatter, it will use the BASE_FORMAT by default and fall back to no colors if the system does not support it """ str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT color_format = color_message(str_format) return ColoredFormatter(color_format) ceph-deploy-2.0.1/ceph_deploy/util/net.py0000644000076500000240000003262513277045417021114 0ustar alfredostaff00000000000000try: from urllib.request import urlopen from urllib.error import HTTPError except ImportError: from urllib2 import urlopen, HTTPError from ceph_deploy import exc import logging import re import socket from ceph_deploy.lib import remoto LOG = logging.getLogger(__name__) # TODO: at some point, it might be way more accurate to do this in the actual # host where we need to get IPs from. SaltStack does this by calling `ip` and # parsing the output, which is probably the one true way of dealing with it. def get_nonlocal_ip(host, subnet=None): """ Search result of getaddrinfo() for a non-localhost-net address """ try: ailist = socket.getaddrinfo(host, None) except socket.gaierror: raise exc.UnableToResolveError(host) for ai in ailist: # an ai is a 5-tuple; the last element is (ip, port) ip = ai[4][0] if subnet and ip_in_subnet(ip, subnet): LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % ( ip, host, subnet,) ) return ip if not ip.startswith('127.'): if subnet: LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % ( ip, host, subnet,) ) return ip raise exc.UnableToResolveError(host) def ip_in_subnet(ip, subnet): """Does IP exists in a given subnet utility. Returns a boolean""" ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16) netstr, bits = subnet.split('/') netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16) mask = (0xffffffff << (32 - int(bits))) & 0xffffffff return (ipaddr & mask) == (netaddr & mask) def in_subnet(cidr, addrs=None): """ Returns True if host is within specified subnet, otherwise False """ for address in addrs: if ip_in_subnet(address, cidr): return True return False def ip_addresses(conn, interface=None, include_loopback=False): """ Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. Example output looks like:: >>> ip_addresses(conn) >>> ['192.168.1.111', '10.0.1.12', '2001:db8::100'] """ ret = set() ifaces = linux_interfaces(conn) if interface is None: target_ifaces = ifaces else: target_ifaces = dict((k, v) for k, v in ifaces.items() if k == interface) if not target_ifaces: LOG.error('Interface {0} not found.'.format(interface)) for info in target_ifaces.values(): for ipv4 in info.get('inet', []): loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo' if not loopback or include_loopback: ret.add(ipv4['address']) for secondary in info.get('secondary', []): addr = secondary.get('address') if addr and secondary.get('type') == 'inet': if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])): ret.add(addr) for ipv6 in info.get('inet6', []): # When switching to Python 3 the IPAddress module can do all this work for us if ipv6.get('address').startswith('fe80::'): continue if not include_loopback and '::1' == ipv6.get('address'): continue ret.add(ipv6['address']) if ret: conn.logger.debug('IP addresses found: %s' % str(list(ret))) return sorted(list(ret)) def linux_interfaces(conn): """ Obtain interface information for *NIX/BSD variants in remote servers. Example output from a remote node with a couple of interfaces:: {'eth0': {'hwaddr': '08:00:27:08:c2:e4', 'inet': [{'address': '10.0.2.15', 'broadcast': '10.0.2.255', 'label': 'eth0', 'netmask': '255.255.255.0'}], 'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4', 'prefixlen': '64'}], 'up': True}, 'eth1': {'hwaddr': '08:00:27:70:06:f1', 'inet': [{'address': '192.168.111.101', 'broadcast': '192.168.111.255', 'label': 'eth1', 'netmask': '255.255.255.0'}], 'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1', 'prefixlen': '64'}], 'up': True}, 'lo': {'hwaddr': '00:00:00:00:00:00', 'inet': [{'address': '127.0.0.1', 'broadcast': None, 'label': 'lo', 'netmask': '255.0.0.0'}], 'inet6': [{'address': '::1', 'prefixlen': '128'}], 'up': True}} :param conn: A connection object to a remote node """ ifaces = dict() ip_path = conn.remote_module.which('ip') ifconfig_path = None if ip_path else conn.remote_module.which('ifconfig') if ip_path: cmd1, _, _ = remoto.process.check( conn, [ '{0}'.format(ip_path), 'link', 'show', ], ) cmd2, _, _ = remoto.process.check( conn, [ '{0}'.format(ip_path), 'addr', 'show', ], ) ifaces = _interfaces_ip(b'\n'.join(cmd1).decode('utf-8') + '\n' + b'\n'.join(cmd2).decode('utf-8')) elif ifconfig_path: cmd, _, _ = remoto.process.check( conn, [ '{0}'.format(ifconfig_path), '-a', ] ) ifaces = _interfaces_ifconfig('\n'.join(cmd)) return ifaces def _interfaces_ip(out): """ Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) """ ret = dict() def parse_network(value, cols): """ Return a tuple of ip, netmask, broadcast based on the current set of cols """ brd = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr return (ip, mask, brd) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): """ Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) """ ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 """ Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' """ return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) def _number_of_set_bits(x): """ Returns the number of bits that are set in a 32bit int """ # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f def cidr_to_ipv4_netmask(cidr_bits): """ Returns an IPv4 netmask """ try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def get_request(url): try: return urlopen(url) except HTTPError as err: LOG.error('repository might not be available yet') raise RuntimeError('%s, failed to fetch %s' % (err, url)) def get_chacra_repo(shaman_url): """ From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string. """ shaman_response = get_request(shaman_url) chacra_url = shaman_response.geturl() chacra_response = get_request(chacra_url) return chacra_response.read() ceph-deploy-2.0.1/ceph_deploy/util/packages.py0000644000076500000240000000414512754333353022076 0ustar alfredostaff00000000000000from ceph_deploy.exc import ExecutableNotFound from ceph_deploy.util import system, versions from ceph_deploy.lib import remoto class Ceph(object): """ Determine different aspects of the Ceph package, like ``version`` and path ``executable``. Although mostly provide a version object that helps for parsing and comparing. """ def __init__(self, conn, _check=None): self.conn = conn self._check = _check or remoto.process.check @property def installed(self): """ If the ``ceph`` executable exists, then Ceph is installed. Should probably be revisited if different components do not have the ``ceph`` executable (this is currently provided by ``ceph-common``). """ return bool(self.executable) @property def executable(self): try: return system.executable_path(self.conn, 'ceph') except ExecutableNotFound: return None def _get_version_output(self): """ Ignoring errors, call `ceph --version` and return only the version portion of the output. For example, output like:: ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg) Would return:: 9.0.1-1234kjd """ if not self.executable: return '' command = [self.executable, '--version'] out, _, _ = self._check(self.conn, command) try: return out.decode('utf-8').split()[2] except IndexError: return '' @property def version(self): """ Return a version object (see :mod:``ceph_deploy.util.versions.NormalizedVersion``) """ return versions.parse_version(self._get_version_output) # callback helpers def ceph_is_installed(module): """ A helper callback to be executed after the connection is made to ensure that Ceph is installed. """ ceph_package = Ceph(module.conn) if not ceph_package.installed: host = module.conn.hostname raise RuntimeError( 'ceph needs to be installed in remote host: %s' % host ) ceph-deploy-2.0.1/ceph_deploy/util/paths/0000755000076500000240000000000013312242253021046 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy/util/paths/__init__.py0000644000076500000240000000011312754333353023165 0ustar alfredostaff00000000000000from . import mon # noqa from . import osd # noqa from . import gpg # noqa ceph-deploy-2.0.1/ceph_deploy/util/paths/gpg.py0000644000076500000240000000034612620214647022207 0ustar alfredostaff00000000000000from ceph_deploy.util import constants def url(key_type, protocol="https"): return "{protocol}://{url}{key_type}.asc".format( protocol=protocol, url=constants.gpg_key_base_url, key_type=key_type ) ceph-deploy-2.0.1/ceph_deploy/util/paths/mon.py0000644000076500000240000000416012620214647022221 0ustar alfredostaff00000000000000""" Common paths for mon, based on the constant file paths defined in ``ceph_deploy.util.constants``. All functions return a string representation of the absolute path construction. """ from os.path import join from ceph_deploy.util import constants def base(cluster): cluster = "%s-" % cluster return join(constants.mon_path, cluster) def path(cluster, hostname): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.path('mycluster', 'hostname') /var/lib/ceph/mon/mycluster-myhostname """ return "%s%s" % (base(cluster), hostname) def done(cluster, hostname): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.done('mycluster', 'hostname') /var/lib/ceph/mon/mycluster-myhostname/done """ return join(path(cluster, hostname), 'done') def init(cluster, hostname, init): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.init('mycluster', 'hostname', 'init') /var/lib/ceph/mon/mycluster-myhostname/init """ return join(path(cluster, hostname), init) def keyring(cluster, hostname): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.keyring('mycluster', 'myhostname') /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring """ keyring_file = '%s-%s.mon.keyring' % (cluster, hostname) return join(constants.tmp_path, keyring_file) def asok(cluster, hostname): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.asok('mycluster', 'myhostname') /var/run/ceph/mycluster-mon.myhostname.asok """ asok_file = '%s-mon.%s.asok' % (cluster, hostname) return join(constants.base_run_path, asok_file) def monmap(cluster, hostname): """ Example usage:: >>> from ceph_deploy.util.paths import mon >>> mon.monmap('mycluster', 'myhostname') /var/lib/ceph/tmp/mycluster.myhostname.monmap """ monmap mon_map_file = '%s.%s.monmap' % (cluster, hostname) return join(constants.tmp_path, mon_map_file) ceph-deploy-2.0.1/ceph_deploy/util/paths/osd.py0000644000076500000240000000053212620214647022214 0ustar alfredostaff00000000000000""" Comosd paths for osd, based on the constant file paths defined in ``ceph_deploy.util.constants``. All functions return a string representation of the absolute path construction. """ from os.path import join from ceph_deploy.util import constants def base(cluster): cluster = "%s-" % cluster return join(constants.osd_path, cluster) ceph-deploy-2.0.1/ceph_deploy/util/pkg_managers.py0000644000076500000240000002306313277045417022760 0ustar alfredostaff00000000000000import os try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse from ceph_deploy.lib import remoto from ceph_deploy.util import templates class PackageManager(object): """ Base class for all Package Managers """ def __init__(self, remote_conn): self.remote_info = remote_conn self.remote_conn = remote_conn.conn def _run(self, cmd, **kw): return remoto.process.run( self.remote_conn, cmd, **kw ) def _check(self, cmd, **kw): return remoto.process.check( self.remote_conn, cmd, **kw ) def install(self, packages, **kw): """Install packages on remote node""" raise NotImplementedError() def remove(self, packages, **kw): """Uninstall packages on remote node""" raise NotImplementedError() def clean(self): """Clean metadata/cache""" raise NotImplementedError() def add_repo_gpg_key(self, url): """Add given GPG key for repo verification""" raise NotImplementedError() def add_repo(self, name, url, **kw): """Add/rewrite a repo file""" raise NotImplementedError() def remove_repo(self, name): """Remove a repo definition""" raise NotImplementedError() class RPMManagerBase(PackageManager): """ Base class to hold common pieces of Yum and DNF """ executable = None name = None def install(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_install_flags', None) cmd = [ self.executable, '-y', 'install', ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def remove(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_remove_flags', None) cmd = [ self.executable, '-y', '-q', 'remove', ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def clean(self, item=None): item = item or 'all' cmd = [ self.executable, 'clean', item, ] return self._run(cmd) def add_repo_gpg_key(self, url): cmd = ['rpm', '--import', url] self._run(cmd) def add_repo(self, name, url, **kw): gpg_url = kw.pop('gpg_url', None) if gpg_url: self.add_repo_gpg_key(gpg_url) gpgcheck=1 else: gpgcheck=0 # RPM repo defaults description = kw.pop('description', '%s repo' % name) enabled = kw.pop('enabled', 1) proxy = kw.pop('proxy', '') # will get ignored if empty _type = 'repo-md' baseurl = url.strip('/') # Remove trailing slashes ceph_repo_content = templates.custom_repo( reponame=name, name=description, baseurl=baseurl, enabled=enabled, gpgcheck=gpgcheck, _type=_type, gpgkey=gpg_url, proxy=proxy, **kw ) self.remote_conn.remote_module.write_yum_repo( ceph_repo_content, '%s.repo' % name ) def remove_repo(self, name): filename = os.path.join( '/etc/yum.repos.d', '%s.repo' % name ) self.remote_conn.remote_module.unlink(filename) class DNF(RPMManagerBase): """ The DNF Package manager """ executable = 'dnf' name = 'dnf' def install(self, packages, **kw): extra_install_flags = kw.pop('extra_install_flags', []) if '--best' not in extra_install_flags: extra_install_flags.append('--best') super(DNF, self).install( packages, extra_install_flags=extra_install_flags, **kw ) class Yum(RPMManagerBase): """ The Yum Package manager """ executable = 'yum' name = 'yum' class Apt(PackageManager): """ Apt package management """ executable = [ 'env', 'DEBIAN_FRONTEND=noninteractive', 'DEBIAN_PRIORITY=critical', 'apt-get', '--assume-yes', '-q', ] name = 'apt' def install(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_install_flags', None) cmd = self.executable + [ '--no-install-recommends', 'install' ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def remove(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_remove_flags', None) cmd = self.executable + [ '-f', '--force-yes', 'remove' ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def clean(self): cmd = self.executable + ['update'] return self._run(cmd) def add_repo_gpg_key(self, url): gpg_path = url.split('file://')[-1] if not url.startswith('file://'): cmd = ['wget', '-O', 'release.asc', url ] self._run(cmd, stop_on_nonzero=False) gpg_file = 'release.asc' if not url.startswith('file://') else gpg_path cmd = ['apt-key', 'add', gpg_file] self._run(cmd) def add_repo(self, name, url, **kw): gpg_url = kw.pop('gpg_url', None) if gpg_url: self.add_repo_gpg_key(gpg_url) safe_filename = '%s.list' % name.replace(' ', '-') mode = 0o644 if urlparse(url).password: mode = 0o600 self.remote_conn.logger.info( "Creating repo file with mode 0600 due to presence of password" ) self.remote_conn.remote_module.write_sources_list( url, self.remote_info.codename, safe_filename, mode ) # Add package pinning for this repo fqdn = urlparse(url).hostname self.remote_conn.remote_module.set_apt_priority(fqdn) def remove_repo(self, name): safe_filename = '%s.list' % name.replace(' ', '-') filename = os.path.join( '/etc/apt/sources.list.d', safe_filename ) self.remote_conn.remote_module.unlink(filename) class Zypper(PackageManager): """ Zypper package management """ executable = [ 'zypper', '--non-interactive', '--quiet' ] name = 'zypper' def install(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_install_flags', None) cmd = self.executable + ['install'] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def remove(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_remove_flags', None) cmd = self.executable + ['--ignore-unknown', 'remove'] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) stdout, stderr, exitrc = self._check( cmd, **kw ) # exitrc is 104 when package(s) not installed. if not exitrc in [0, 104]: raise RuntimeError("Failed to execute command: %s" % " ".join(cmd)) return def clean(self): cmd = self.executable + ['refresh'] return self._run(cmd) class Pacman(PackageManager): """ Pacman package management """ executable = [ 'pacman', '--noconfirm', ] name = 'pacman' def install(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_install_flags', None) cmd = self.executable + [ '-Sy', ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def remove(self, packages, **kw): if isinstance(packages, str): packages = [packages] extra_flags = kw.pop('extra_remove_flags', None) cmd = self.executable + [ '-R' ] if extra_flags: if isinstance(extra_flags, str): extra_flags = [extra_flags] cmd.extend(extra_flags) cmd.extend(packages) return self._run(cmd) def clean(self): cmd = self.executable + ['-Syy'] return self._run(cmd) def add_repo_gpg_key(self, url): cmd = ['pacman-key', '-a', url] self._run(cmd) ceph-deploy-2.0.1/ceph_deploy/util/ssh.py0000644000076500000240000000233312754333353021112 0ustar alfredostaff00000000000000import logging from ceph_deploy.lib import remoto from ceph_deploy.connection import get_local_connection def can_connect_passwordless(hostname): """ Ensure that current host can SSH remotely to the remote host using the ``BatchMode`` option to prevent a password prompt. That attempt will error with an exit status of 255 and a ``Permission denied`` message or a``Host key verification failed`` message. """ # Ensure we are not doing this for local hosts if not remoto.connection.needs_ssh(hostname): return True logger = logging.getLogger(hostname) with get_local_connection(logger) as conn: # Check to see if we can login, disabling password prompts command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname] out, err, retval = remoto.process.check(conn, command, stop_on_error=False) permission_denied_error = b'Permission denied ' host_key_verify_error = b'Host key verification failed.' has_key_error = False for line in err: if permission_denied_error in line or host_key_verify_error in line: has_key_error = True if retval == 255 and has_key_error: return False return True ceph-deploy-2.0.1/ceph_deploy/util/system.py0000644000076500000240000001164413277045417021650 0ustar alfredostaff00000000000000from ceph_deploy.exc import ExecutableNotFound from ceph_deploy.lib import remoto def executable_path(conn, executable): """ Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so. Otherwise an exception with thorough details will be raised, informing the user that the executable was not found. """ executable_path = conn.remote_module.which(executable) if not executable_path: raise ExecutableNotFound(executable, conn.hostname) return executable_path def is_systemd(conn): """ Attempt to detect if a remote system is a systemd one or not by looking into ``/proc`` just like the ceph init script does:: # detect systemd # SYSTEMD=0 grep -qs systemd /proc/1/comm && SYSTEMD=1 """ return conn.remote_module.grep( 'systemd', '/proc/1/comm' ) def is_upstart(conn): """ This helper should only used as a fallback (last resort) as it is not guaranteed that it will be absolutely correct. """ # it may be possible that we may be systemd and the caller never checked # before so lets do that if is_systemd(conn): return False # get the initctl executable, if it doesn't exist we can't proceed so we # are probably not upstart initctl = conn.remote_module.which('initctl') if not initctl: return False # finally, try and get output from initctl that might hint this is an upstart # system. On a Ubuntu 14.04.2 system this would look like: # $ initctl version # init (upstart 1.12.1) stdout, stderr, _ = remoto.process.check( conn, [initctl, 'version'], ) result_string = b' '.join(stdout) if b'upstart' in result_string: return True return False def enable_service(conn, service='ceph'): """ Enable a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): remoto.process.run( conn, [ 'systemctl', 'enable', '{service}'.format(service=service), ] ) else: remoto.process.run( conn, [ 'chkconfig', '{service}'.format(service=service), 'on', ] ) def disable_service(conn, service='ceph'): """ Disable a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): # Without the check, an error is raised trying to disable an # already disabled service if is_systemd_service_enabled(conn, service): remoto.process.run( conn, [ 'systemctl', 'disable', '{service}'.format(service=service), ] ) def stop_service(conn, service='ceph'): """ Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): # Without the check, an error is raised trying to stop an # already stopped service if is_systemd_service_active(conn, service): remoto.process.run( conn, [ 'systemctl', 'stop', '{service}'.format(service=service), ] ) def start_service(conn, service='ceph'): """ Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): remoto.process.run( conn, [ 'systemctl', 'start', '{service}'.format(service=service), ] ) def is_systemd_service_active(conn, service='ceph'): """ Detects if a systemd service is active or not. """ _, _, returncode = remoto.process.check( conn, [ 'systemctl', 'is-active', '--quiet', '{service}'.format(service=service), ] ) return returncode == 0 def is_systemd_service_enabled(conn, service='ceph'): """ Detects if a systemd service is enabled or not. """ _, _, returncode = remoto.process.check( conn, [ 'systemctl', 'is-enabled', '--quiet', '{service}'.format(service=service), ] ) return returncode == 0 ceph-deploy-2.0.1/ceph_deploy/util/templates.py0000644000076500000240000000421212754333353022311 0ustar alfredostaff00000000000000 ceph_repo = """[ceph] name=Ceph packages for $basearch baseurl={repo_url}/$basearch enabled=1 gpgcheck={gpgcheck} priority=1 type=rpm-md gpgkey={gpg_url} [ceph-noarch] name=Ceph noarch packages baseurl={repo_url}/noarch enabled=1 gpgcheck={gpgcheck} priority=1 type=rpm-md gpgkey={gpg_url} [ceph-source] name=Ceph source packages baseurl={repo_url}/SRPMS enabled=0 gpgcheck={gpgcheck} type=rpm-md gpgkey={gpg_url} """ zypper_repo = """[ceph] name=Ceph packages type=rpm-md baseurl={repo_url} gpgcheck={gpgcheck} gpgkey={gpg_url} enabled=1 """ def custom_repo(**kw): """ Repo files need special care in that a whole line should not be present if there is no value for it. Because we were using `format()` we could not conditionally add a line for a repo file. So the end result would contain a key with a missing value (say if we were passing `None`). For example, it could look like:: [ceph repo] name= ceph repo proxy= gpgcheck= Which breaks. This function allows us to conditionally add lines, preserving an order and be more careful. Previously, and for historical purposes, this is how the template used to look:: custom_repo = [{repo_name}] name={name} baseurl={baseurl} enabled={enabled} gpgcheck={gpgcheck} type={_type} gpgkey={gpgkey} proxy={proxy} """ lines = [] # by using tuples (vs a dict) we preserve the order of what we want to # return, like starting with a [repo name] tmpl = ( ('reponame', '[%s]'), ('name', 'name=%s'), ('baseurl', 'baseurl=%s'), ('enabled', 'enabled=%s'), ('gpgcheck', 'gpgcheck=%s'), ('_type', 'type=%s'), ('gpgkey', 'gpgkey=%s'), ('proxy', 'proxy=%s'), ('priority', 'priority=%s'), ) for line in tmpl: tmpl_key, tmpl_value = line # key values from tmpl # ensure that there is an actual value (not None nor empty string) if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''): lines.append(tmpl_value % kw.get(tmpl_key)) return '\n'.join(lines) ceph-deploy-2.0.1/ceph_deploy/util/versions.py0000644000076500000240000000315412656121033022156 0ustar alfredostaff00000000000000 class NormalizedVersion(object): """ A class to provide a clean interface for setting/retrieving distinct version parts divided into major, minor, and patch (following convnetions from semver (see http://semver.org/) Since a lot of times version parts need to be compared, it provides for `int` representations of their string counterparts, with some sanitization processing. Defaults to '0' or 0 (int) values when values are not set or parsing fails. """ def __init__(self, raw_version): self.raw_version = raw_version.strip() self.major = '0' self.minor = '0' self.patch = '0' self.garbage = '' self.int_major = 0 self.int_minor = 0 self.int_patch = 0 self._version_map = {} self._set_versions() def _set_int_versions(self): version_map = dict( major=self.major, minor=self.minor, patch=self.patch, garbage=self.garbage) # safe int versions that remove non-numerical chars # for example 'rc1' in a version like '1-rc1 for name, value in version_map.items(): if '-' in value: # get rid of garbage like -dev1 or -rc1 value = value.split('-')[0] value = float(''.join(c for c in value if c.isdigit()) or 0) int_name = "int_%s" % name setattr(self, int_name, value) def _set_versions(self): split_version = (self.raw_version.split('.') + ["0"]*4)[:4] self.major, self.minor, self.patch, self.garbage = split_version self._set_int_versions() ceph-deploy-2.0.1/ceph_deploy/validate.py0000644000076500000240000000056612236715242021133 0ustar alfredostaff00000000000000import argparse import re ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$') def alphanumeric(s): """ Enforces string to be alphanumeric with leading alpha. """ if not ALPHANUMERIC_RE.match(s): raise argparse.ArgumentTypeError( 'argument must start with a letter and contain only letters and numbers', ) return s ceph-deploy-2.0.1/ceph_deploy.egg-info/0000755000076500000240000000000013312242252020443 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/ceph_deploy.egg-info/dependency_links.txt0000644000076500000240000000000113312242252024511 0ustar alfredostaff00000000000000 ceph-deploy-2.0.1/ceph_deploy.egg-info/entry_points.txt0000644000076500000240000000124413312242252023742 0ustar alfredostaff00000000000000[ceph_deploy.cli] admin = ceph_deploy.admin:make calamari = ceph_deploy.calamari:make config = ceph_deploy.config:make disk = ceph_deploy.osd:make_disk forgetkeys = ceph_deploy.forgetkeys:make gatherkeys = ceph_deploy.gatherkeys:make install = ceph_deploy.install:make mds = ceph_deploy.mds:make mgr = ceph_deploy.mgr:make mon = ceph_deploy.mon:make new = ceph_deploy.new:make osd = ceph_deploy.osd:make pkg = ceph_deploy.pkg:make purge = ceph_deploy.install:make_purge purgedata = ceph_deploy.install:make_purge_data repo = ceph_deploy.repo:make rgw = ceph_deploy.rgw:make uninstall = ceph_deploy.install:make_uninstall [console_scripts] ceph-deploy = ceph_deploy.cli:main ceph-deploy-2.0.1/ceph_deploy.egg-info/PKG-INFO0000644000076500000240000003662613312242252021555 0ustar alfredostaff00000000000000Metadata-Version: 1.0 Name: ceph-deploy Version: 2.0.1 Summary: Deploy Ceph with minimal infrastructure Home-page: https://github.com/ceph/ceph-deploy Author: Inktank Author-email: ceph-devel@vger.kernel.org License: MIT Description: ======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. This ``README`` provides a brief overview of ceph-deploy, for thorough documentation please go to http://ceph.com/ceph-deploy/docs .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` DEB release repos. The DEB release repos are found at:: http://ceph.com/debian-{release} http://ceph.com/debian-testing This means, for example, that installing ``ceph-deploy`` from http://ceph.com/debian-giant will install the same version as from http://ceph.com/debian-firefly or http://ceph.com/debian-testing. RPM --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` RPM release repos. The RPM release repos are found at:: http://ceph.com/rpm-{release} http://ceph.com/rpm-testing Make sure you add the proper one for your distribution (i.e. el7 vs rhel7). This means, for example, that installing ``ceph-deploy`` from http://ceph.com/rpm-giant will install the same version as from http://ceph.com/rpm-firefly or http://ceph.com/rpm-testing. bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin without-password PubkeyAuthentication yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the ``wget`` env variables to point to the right hosts, for example, put following lines into ``/root/.wgetrc`` on each node (since ceph-deploy runs wget as root):: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/ceph-deploy/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/ceph-deploy/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub. Keywords: ceph deploy Platform: UNKNOWN ceph-deploy-2.0.1/ceph_deploy.egg-info/requires.txt0000644000076500000240000000001313312242252023035 0ustar alfredostaff00000000000000setuptools ceph-deploy-2.0.1/ceph_deploy.egg-info/SOURCES.txt0000644000076500000240000001235413312242252022334 0ustar alfredostaff00000000000000CONTRIBUTING.rst LICENSE MANIFEST.in README.rst setup.cfg setup.py tox.ini vendor.py ceph_deploy/__init__.py ceph_deploy/admin.py ceph_deploy/calamari.py ceph_deploy/cli.py ceph_deploy/cliutil.py ceph_deploy/config.py ceph_deploy/connection.py ceph_deploy/exc.py ceph_deploy/forgetkeys.py ceph_deploy/gatherkeys.py ceph_deploy/install.py ceph_deploy/mds.py ceph_deploy/mgr.py ceph_deploy/misc.py ceph_deploy/mon.py ceph_deploy/new.py ceph_deploy/osd.py ceph_deploy/pkg.py ceph_deploy/repo.py ceph_deploy/rgw.py ceph_deploy/validate.py ceph_deploy.egg-info/PKG-INFO ceph_deploy.egg-info/SOURCES.txt ceph_deploy.egg-info/dependency_links.txt ceph_deploy.egg-info/entry_points.txt ceph_deploy.egg-info/requires.txt ceph_deploy.egg-info/top_level.txt ceph_deploy/conf/__init__.py ceph_deploy/conf/ceph.py ceph_deploy/conf/cephdeploy.py ceph_deploy/hosts/__init__.py ceph_deploy/hosts/common.py ceph_deploy/hosts/remotes.py ceph_deploy/hosts/util.py ceph_deploy/hosts/arch/__init__.py ceph_deploy/hosts/arch/install.py ceph_deploy/hosts/arch/uninstall.py ceph_deploy/hosts/arch/mon/__init__.py ceph_deploy/hosts/centos/__init__.py ceph_deploy/hosts/centos/install.py ceph_deploy/hosts/centos/uninstall.py ceph_deploy/hosts/centos/mon/__init__.py ceph_deploy/hosts/debian/__init__.py ceph_deploy/hosts/debian/install.py ceph_deploy/hosts/debian/uninstall.py ceph_deploy/hosts/debian/mon/__init__.py ceph_deploy/hosts/fedora/__init__.py ceph_deploy/hosts/fedora/install.py ceph_deploy/hosts/fedora/uninstall.py ceph_deploy/hosts/fedora/mon/__init__.py ceph_deploy/hosts/rhel/__init__.py ceph_deploy/hosts/rhel/install.py ceph_deploy/hosts/rhel/uninstall.py ceph_deploy/hosts/rhel/mon/__init__.py ceph_deploy/hosts/suse/__init__.py ceph_deploy/hosts/suse/install.py ceph_deploy/hosts/suse/uninstall.py ceph_deploy/hosts/suse/mon/__init__.py ceph_deploy/lib/__init__.py ceph_deploy/lib/vendor/__init__.py ceph_deploy/lib/vendor/remoto/__init__.py ceph_deploy/lib/vendor/remoto/connection.py ceph_deploy/lib/vendor/remoto/exc.py ceph_deploy/lib/vendor/remoto/file_sync.py ceph_deploy/lib/vendor/remoto/log.py ceph_deploy/lib/vendor/remoto/process.py ceph_deploy/lib/vendor/remoto/util.py ceph_deploy/lib/vendor/remoto/lib/__init__.py ceph_deploy/lib/vendor/remoto/lib/vendor/__init__.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/__init__.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/apipkg.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/deprecated.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_base.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_bootstrap.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_io.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_socket.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/multi.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/rsync.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/rsync_remote.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/xspec.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/__init__.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/loop_socketserver.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/quitserver.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/shell.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/socketserver.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/socketserverservice.py ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/script/xx.py ceph_deploy/tests/__init__.py ceph_deploy/tests/conftest.py ceph_deploy/tests/directory.py ceph_deploy/tests/fakes.py ceph_deploy/tests/test_cli_admin.py ceph_deploy/tests/test_cli_mon.py ceph_deploy/tests/test_cli_new.py ceph_deploy/tests/test_cli_rgw.py ceph_deploy/tests/test_conf.py ceph_deploy/tests/test_gather_keys.py ceph_deploy/tests/test_gather_keys_missing.py ceph_deploy/tests/test_gather_keys_with_mon.py ceph_deploy/tests/test_install.py ceph_deploy/tests/test_keys_equivalent.py ceph_deploy/tests/test_mon.py ceph_deploy/tests/test_remotes.py ceph_deploy/tests/util.py ceph_deploy/tests/parser/__init__.py ceph_deploy/tests/parser/test_admin.py ceph_deploy/tests/parser/test_calamari.py ceph_deploy/tests/parser/test_config.py ceph_deploy/tests/parser/test_disk.py ceph_deploy/tests/parser/test_gatherkeys.py ceph_deploy/tests/parser/test_install.py ceph_deploy/tests/parser/test_main.py ceph_deploy/tests/parser/test_mds.py ceph_deploy/tests/parser/test_mon.py ceph_deploy/tests/parser/test_new.py ceph_deploy/tests/parser/test_osd.py ceph_deploy/tests/parser/test_pkg.py ceph_deploy/tests/parser/test_purge.py ceph_deploy/tests/parser/test_purgedata.py ceph_deploy/tests/parser/test_repo.py ceph_deploy/tests/parser/test_rgw.py ceph_deploy/tests/parser/test_uninstall.py ceph_deploy/util/__init__.py ceph_deploy/util/arg_validators.py ceph_deploy/util/constants.py ceph_deploy/util/decorators.py ceph_deploy/util/files.py ceph_deploy/util/help_formatters.py ceph_deploy/util/log.py ceph_deploy/util/net.py ceph_deploy/util/packages.py ceph_deploy/util/pkg_managers.py ceph_deploy/util/ssh.py ceph_deploy/util/system.py ceph_deploy/util/templates.py ceph_deploy/util/versions.py ceph_deploy/util/paths/__init__.py ceph_deploy/util/paths/gpg.py ceph_deploy/util/paths/mon.py ceph_deploy/util/paths/osd.py scripts/ceph-deployceph-deploy-2.0.1/ceph_deploy.egg-info/top_level.txt0000644000076500000240000000001413312242252023170 0ustar alfredostaff00000000000000ceph_deploy ceph-deploy-2.0.1/CONTRIBUTING.rst0000644000076500000240000000305612656121033017126 0ustar alfredostaff00000000000000Contributing to ceph-deploy =========================== Before any contributions, a reference ticket *must* exist. The community issue tracker is hosted at tracker.ceph.com To open a new issue, requests can go to: http://tracker.ceph.com/projects/ceph-deploy/issues/new commits ------- Once a ticket exists, commits should be prefaced by the ticket ID. This makes it easier for maintainers to keep track of why a given line changed, mapping directly to work done on a ticket. For tickets coming from tracker.ceph.com, we expect the following format:: [RM-0000] this is a commit message for tracker.ceph.com ``RM`` stands for Redmine which is the software running tracker.ceph.com. Similarly, if a ticket was created in bugzilla.redhat.com, we expect the following format:: [BZ-0000] this is a commit message for bugzilla.redhat.com To automate this process, you can create a branch with the tracker identifier and id (replace "0000" with the ticket number):: git checkout -b RM-0000 And then use the follow prepare-commit-msg: https://gist.github.com/alfredodeza/6d62d99a95c9a7975fbe Copy that file to ``$GITREPOSITORY/.git/hooks/prepare-commit-msg`` and mark it executable. Your commit messages should then be automatically prefixed with the branch name based off of the issue tracker. tests and documentation ----------------------- Wherever it is feasible, tests must exist and documentation must be added or improved depending on the change. The build process not only runs tests but ensures that docs can be built from the proposed changes as well. ceph-deploy-2.0.1/LICENSE0000644000076500000240000000205112236715242015471 0ustar alfredostaff00000000000000Copyright (c) 2012 Inktank Storage, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ceph-deploy-2.0.1/MANIFEST.in0000644000076500000240000000013412620214647016222 0ustar alfredostaff00000000000000include *.rst include LICENSE include scripts/ceph-deploy include vendor.py include tox.ini ceph-deploy-2.0.1/PKG-INFO0000644000076500000240000003662613312242253015571 0ustar alfredostaff00000000000000Metadata-Version: 1.0 Name: ceph-deploy Version: 2.0.1 Summary: Deploy Ceph with minimal infrastructure Home-page: https://github.com/ceph/ceph-deploy Author: Inktank Author-email: ceph-devel@vger.kernel.org License: MIT Description: ======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. This ``README`` provides a brief overview of ceph-deploy, for thorough documentation please go to http://ceph.com/ceph-deploy/docs .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` DEB release repos. The DEB release repos are found at:: http://ceph.com/debian-{release} http://ceph.com/debian-testing This means, for example, that installing ``ceph-deploy`` from http://ceph.com/debian-giant will install the same version as from http://ceph.com/debian-firefly or http://ceph.com/debian-testing. RPM --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` RPM release repos. The RPM release repos are found at:: http://ceph.com/rpm-{release} http://ceph.com/rpm-testing Make sure you add the proper one for your distribution (i.e. el7 vs rhel7). This means, for example, that installing ``ceph-deploy`` from http://ceph.com/rpm-giant will install the same version as from http://ceph.com/rpm-firefly or http://ceph.com/rpm-testing. bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin without-password PubkeyAuthentication yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the ``wget`` env variables to point to the right hosts, for example, put following lines into ``/root/.wgetrc`` on each node (since ceph-deploy runs wget as root):: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/ceph-deploy/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/ceph-deploy/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub. Keywords: ceph deploy Platform: UNKNOWN ceph-deploy-2.0.1/README.rst0000644000076500000240000003033312620214647016157 0ustar alfredostaff00000000000000======================================================== ceph-deploy -- Deploy Ceph with minimal infrastructure ======================================================== ``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to the servers, ``sudo``, and some Python. It runs fully on your workstation, requiring no servers, databases, or anything like that. If you set up and tear down Ceph clusters a lot, and want minimal extra bureaucracy, this is for you. This ``README`` provides a brief overview of ceph-deploy, for thorough documentation please go to http://ceph.com/ceph-deploy/docs .. _what this tool is not: What this tool is not --------------------- It is not a generic deployment system, it is only for Ceph, and is designed for users who want to quickly get Ceph running with sensible initial settings without the overhead of installing Chef, Puppet or Juju. It does not handle client configuration beyond pushing the Ceph config file and users who want fine-control over security settings, partitions or directory locations should use a tool such as Chef or Puppet. Installation ============ Depending on what type of usage you are going to have with ``ceph-deploy`` you might want to look into the different ways to install it. For automation, you might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would probably install from the OS packages or from the Python Package Index. Python Package Index -------------------- If you are familiar with Python install tools (like ``pip`` and ``easy_install``) you can easily install ``ceph-deploy`` like:: pip install ceph-deploy or:: easy_install ceph-deploy It should grab all the dependencies for you and install into the current user's environment. We highly recommend using ``virtualenv`` and installing dependencies in a contained way. DEB --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` DEB release repos. The DEB release repos are found at:: http://ceph.com/debian-{release} http://ceph.com/debian-testing This means, for example, that installing ``ceph-deploy`` from http://ceph.com/debian-giant will install the same version as from http://ceph.com/debian-firefly or http://ceph.com/debian-testing. RPM --- All new releases of ``ceph-deploy`` are pushed to all ``ceph`` RPM release repos. The RPM release repos are found at:: http://ceph.com/rpm-{release} http://ceph.com/rpm-testing Make sure you add the proper one for your distribution (i.e. el7 vs rhel7). This means, for example, that installing ``ceph-deploy`` from http://ceph.com/rpm-giant will install the same version as from http://ceph.com/rpm-firefly or http://ceph.com/rpm-testing. bootstrapping ------------- To get the source tree ready for use, run this once:: ./bootstrap You can symlink the ``ceph-deploy`` script in this somewhere convenient (like ``~/bin``), or add the current directory to ``PATH``, or just always type the full path to ``ceph-deploy``. SSH and Remote Connections ========================== ``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do not match the current host's hostname. For example, if you are connecting to host ``node1`` it will attempt an SSH connection as long as the current host's hostname is *not* ``node1``. ceph-deploy at a minimum requires that the machine from which the script is being run can ssh as root without password into each Ceph node. To enable this generate a new ssh keypair for the root user with no passphrase and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: /root/.ssh/authorized_keys and ensure that the following lines are in the sshd config:: PermitRootLogin without-password PubkeyAuthentication yes The machine running ceph-deploy does not need to have the Ceph packages installed unless it needs to admin the cluster directly using the ``ceph`` command line tool. usernames --------- When not specified the connection will be done with the same username as the one executing ``ceph-deploy``. This is useful if the same username is shared in all the nodes but can be cumbersome if that is not the case. A way to avoid this is to define the correct usernames to connect with in the SSH config, but you can also use the ``--username`` flag as well:: ceph-deploy --username ceph install node1 ``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. This would be the same expectation for any action that warrants a connection to a remote host. Managing an existing cluster ============================ You can use ceph-deploy to provision nodes for an existing cluster. To grab a copy of the cluster configuration file (normally ``ceph.conf``):: ceph-deploy config pull HOST You will usually also want to gather the encryption keys used for that cluster:: ceph-deploy gatherkeys MONHOST At this point you can skip the steps below that create a new cluster (you already have one) and optionally skip installation and/or monitor creation, depending on what you are trying to accomplish. Creating a new cluster ====================== Creating a new configuration ---------------------------- To create a new configuration file and secret key, decide what hosts will run ``ceph-mon``, and run:: ceph-deploy new MON [MON..] listing the hostnames of the monitors. Each ``MON`` can be * a simple hostname. It must be DNS resolvable without the fully qualified domain name. * a fully qualified domain name. The hostname is assumed to be the leading component up to the first ``.``. * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified domain name or IP address. For example, ``foo``, ``foo.example.com``, ``foo:something.example.com``, and ``foo:1.2.3.4`` are all valid. Note, however, that the hostname should match that configured on the host ``foo``. The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your current directory. Edit initial cluster configuration ---------------------------------- You want to review the generated ``ceph.conf`` file and make sure that the ``mon_host`` setting contains the IP addresses you would like the monitors to bind to. These are the IPs that clients will initially contact to authenticate to the cluster, and they need to be reachable both by external client-facing hosts and internal cluster daemons. Installing packages =================== To install the Ceph software on the servers, run:: ceph-deploy install HOST [HOST..] This installs the current default *stable* release. You can choose a different release track with command line options, for example to use a release candidate:: ceph-deploy install --testing HOST Or to test a development branch:: ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] Proxy or Firewall Installs -------------------------- If attempting to install behind a firewall or through a proxy you can use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes to the distro's repository in order to install the packages and it will go straight to package installation. That will allow an environment without internet access to point to *its own repositories*. This means that those repositories will need to be properly setup (and mirrored with all the necessary dependencies) before attempting an install. Another alternative is to set the ``wget`` env variables to point to the right hosts, for example, put following lines into ``/root/.wgetrc`` on each node (since ceph-deploy runs wget as root):: http_proxy=http://host:port ftp_proxy=http://host:port https_proxy=http://host:port Deploying monitors ================== To actually deploy ``ceph-mon`` to the hosts you chose, run:: ceph-deploy mon create HOST [HOST..] Without explicit hosts listed, hosts in ``mon_initial_members`` in the config file are deployed. That is, the hosts you passed to ``ceph-deploy new`` are the default value here. Gather keys =========== To gather authenticate keys (for administering the cluster and bootstrapping new nodes) to the local directory, run:: ceph-deploy gatherkeys HOST [HOST...] where ``HOST`` is one of the monitor hosts. Once these keys are in the local directory, you can provision new OSDs etc. Deploying OSDs ============== To prepare a node for running OSDs, run:: ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] After that, the hosts will be running OSDs for the given data disks. If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be created and GPT labels will be used to mark and automatically activate OSD volumes. If an existing partition is specified, the partition table will not be modified. If you want to destroy the existing partition table on DISK first, you can include the ``--zap-disk`` option. If there is already a prepared disk or directory that is ready to become an OSD, you can also do:: ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] This is useful when you are managing the mounting of volumes yourself. Admin hosts =========== To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` keyring so that it can administer the cluster, run:: ceph-deploy admin HOST [HOST ...] Forget keys =========== The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in the local directory. If you are worried about them being there for security reasons, run:: ceph-deploy forgetkeys and they will be removed. If you need them again later to deploy additional nodes, simply re-run:: ceph-deploy gatherkeys HOST [HOST...] and they will be retrieved from an existing monitor node. Multiple clusters ================= All of the above commands take a ``--cluster=NAME`` option, allowing you to manage multiple clusters conveniently from one workstation. For example:: ceph-deploy --cluster=us-west new vi us-west.conf ceph-deploy --cluster=us-west mon FAQ === Before anything --------------- Make sure you have the latest version of ``ceph-deploy``. It is actively developed and releases are coming weekly (on average). The most recent versions of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check with your package manager and update if there is anything new. Why is feature X not implemented? --------------------------------- Usually, features are added when/if it is sensible for someone that wants to get started with ceph and said feature would make sense in that context. If you believe this is the case and you've read "`what this tool is not`_" and still think feature ``X`` should exist in ceph-deploy, open a feature request in the ceph tracker: http://tracker.ceph.com/projects/ceph-deploy/issues A command gave me an error, what is going on? --------------------------------------------- Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host that you have configured when creating the initial config. If a given command is not working as expected try to run the command that failed in the remote host and assert the behavior there. If the behavior in the remote host is the same, then it is probably not something wrong with ``ceph-deploy`` per-se. Make sure you capture the output of both the ``ceph-deploy`` output and the output of the command in the remote host. Issues with monitors -------------------- If your monitors are not starting, make sure that the ``{hostname}`` you used when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` in the remote host. Newer versions of ``ceph-deploy`` should warn you if the results are different but that might prevent the monitors from reaching quorum. Developing ceph-deploy ====================== Now that you have cracked your teeth on Ceph, you might find that you want to contribute to ceph-deploy. Resources --------- Bug tracking: http://tracker.ceph.com/projects/ceph-deploy/issues Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ Submitting Patches ------------------ Please add test cases to cover any code you add. You can test your changes by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside the git clone When creating a commit message please use ``git commit -s`` or otherwise add ``Signed-off-by: Your Name `` to your commit message. Patches can then be submitted by a pull request on GitHub. ceph-deploy-2.0.1/scripts/0000755000076500000240000000000013312242253016146 5ustar alfredostaff00000000000000ceph-deploy-2.0.1/scripts/ceph-deploy0000755000076500000240000000120312620214647020310 0ustar alfredostaff00000000000000#!/usr/bin/env python import os import platform import sys """ ceph-deploy - admin tool for ceph """ if os.path.exists('/usr/share/pyshared/ceph_deploy'): sys.path.insert(0,'/usr/share/pyshared/ceph_deploy') elif os.path.exists('/usr/share/ceph-deploy'): sys.path.insert(0,'/usr/share/ceph-deploy') elif os.path.exists('/usr/share/pyshared/ceph-deploy'): sys.path.insert(0,'/usr/share/pyshared/ceph-deploy') elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'): sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy') from ceph_deploy.cli import main if __name__ == '__main__': sys.exit(main()) ceph-deploy-2.0.1/setup.cfg0000644000076500000240000000012613312242253016277 0ustar alfredostaff00000000000000[tool:pytest] norecursedirs = .* _* virtualenv [egg_info] tag_build = tag_date = 0 ceph-deploy-2.0.1/setup.py0000644000076500000240000000431213243310456016175 0ustar alfredostaff00000000000000from setuptools import setup, find_packages import os import sys import ceph_deploy from vendor import vendorize, clean_vendor def read(fname): path = os.path.join(os.path.dirname(__file__), fname) f = open(path) return f.read() install_requires = [] pyversion = sys.version_info[:2] if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1): install_requires.append('argparse') # # Add libraries that are not part of install_requires but only if we really # want to, specified by the environment flag # if os.environ.get('CEPH_DEPLOY_NO_VENDOR'): clean_vendor('remoto') else: vendorize([ ('remoto', '0.0.29', ['python', 'vendor.py']), ]) setup( name='ceph-deploy', version=ceph_deploy.__version__, packages=find_packages(), author='Inktank', author_email='ceph-devel@vger.kernel.org', description='Deploy Ceph with minimal infrastructure', long_description=read('README.rst'), license='MIT', keywords='ceph deploy', url="https://github.com/ceph/ceph-deploy", install_requires=[ 'setuptools', ] + install_requires, tests_require=[ 'pytest >=2.1.3', 'mock >=1.0b1', ], entry_points={ 'console_scripts': [ 'ceph-deploy = ceph_deploy.cli:main', ], 'ceph_deploy.cli': [ 'new = ceph_deploy.new:make', 'install = ceph_deploy.install:make', 'uninstall = ceph_deploy.install:make_uninstall', 'purge = ceph_deploy.install:make_purge', 'purgedata = ceph_deploy.install:make_purge_data', 'mon = ceph_deploy.mon:make', 'gatherkeys = ceph_deploy.gatherkeys:make', 'osd = ceph_deploy.osd:make', 'disk = ceph_deploy.osd:make_disk', 'mds = ceph_deploy.mds:make', 'mgr = ceph_deploy.mgr:make', 'forgetkeys = ceph_deploy.forgetkeys:make', 'config = ceph_deploy.config:make', 'admin = ceph_deploy.admin:make', 'pkg = ceph_deploy.pkg:make', 'calamari = ceph_deploy.calamari:make', 'rgw = ceph_deploy.rgw:make', 'repo = ceph_deploy.repo:make', ], }, ) ceph-deploy-2.0.1/tox.ini0000644000076500000240000000120613277045417016006 0ustar alfredostaff00000000000000[tox] envlist = py27, py35, flake8 [testenv] deps= pytest mock==1.0.1 setenv = CEPH_DEPLOY_TEST = 1 commands=py.test -v {posargs:ceph_deploy/tests} [testenv:docs] basepython=python changedir=docs/source deps=sphinx commands= sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html [testenv:flake8] deps=flake8 commands=flake8 --select=F,E9 --exclude=vendor {posargs:ceph_deploy} # Note that ``remoto`` is not added as a dependency here as it is assumed # that the tester will have the distro version of remoto installed [testenv:py26-novendor] sitepackages=True deps= [testenv:py27-novendor] sitepackages=True deps= ceph-deploy-2.0.1/vendor.py0000644000076500000240000000672312754333353016351 0ustar alfredostaff00000000000000from __future__ import print_function import subprocess import os from os import path import re import traceback import sys error_msg = """ This library depends on sources fetched when packaging that failed to be retrieved. This means that it will *not* work as expected. Errors encountered: """ def run(cmd): print('[vendoring] Running command: %s' % ' '.join(cmd)) try: result = subprocess.Popen( cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE ) except Exception: # if building with python2.5 this makes it compatible _, error, _ = sys.exc_info() print_error([], traceback.format_exc(error).split('\n')) raise SystemExit(1) if result.wait(): print_error(result.stdout.readlines(), result.stderr.readlines()) return result.returncode def print_error(stdout, stderr): print('*'*80) print(error_msg) for line in stdout: print(line) for line in stderr: print(line) print('*'*80) def vendor_library(name, version, cmd=None): this_dir = path.dirname(path.abspath(__file__)) vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name) vendor_init = path.join(vendor_dest, '__init__.py') vendor_src = path.join(this_dir, name) vendor_module = path.join(vendor_src, name) current_dir = os.getcwd() if path.exists(vendor_src): run(['rm', '-rf', vendor_src]) if path.exists(vendor_init): # The following read/regex is done so that we can parse module metadata without the need # to import it. Module metadata is defined as variables with double underscores. We are # particularly insteresting in the version string, so we look into single or double quoted # values, like: __version__ = '1.0' module_file = open(vendor_init).read() metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file)) if metadata.get('version') != version: run(['rm', '-rf', vendor_dest]) if not path.exists(vendor_dest): rc = run(['git', 'clone', 'git://git.ceph.com/%s' % name]) if rc: print("%s: git clone failed using ceph.com url with rc %s, trying github.com" % (path.basename(__file__), rc)) run(['git', 'clone', 'https://github.com/ceph/%s.git' % name]) os.chdir(vendor_src) run(['git', 'checkout', version]) if cmd: run(cmd) run(['mv', vendor_module, vendor_dest]) os.chdir(current_dir) def clean_vendor(name): """ Ensure that vendored code/dirs are removed, possibly when packaging when the environment flag is set to avoid vendoring. """ this_dir = path.dirname(path.abspath(__file__)) vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name) run(['rm', '-rf', vendor_dest]) def vendorize(vendor_requirements): """ This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] """ for library in vendor_requirements: if len(library) == 2: name, version = library cmd = None elif len(library) == 3: # a possible cmd we need to run name, version, cmd = library vendor_library(name, version, cmd)