charm-tools-2.1.2/0000775000175000017500000000000012677251067014207 5ustar marcomarco00000000000000charm-tools-2.1.2/setup.py0000775000175000017500000000456212677250311015722 0ustar marcomarco00000000000000#!/usr/bin/env python # # Copyright 2012 Canonical Ltd. This software is licensed under the # GNU General Public License version 3 (see the file LICENSE). from setuptools import setup, find_packages setup( name='charm-tools', version="2.1.2", packages=find_packages( exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['launchpadlib', 'argparse', 'cheetah', 'pyyaml', 'pycrypto', 'paramiko', 'requests', 'libcharmstore', 'blessings', 'ruamel.yaml', 'pathspec', 'otherstuf', 'path.py', 'pip', 'jujubundlelib', 'virtualenv', 'colander', 'jsonschema'], include_package_data=True, maintainer='Marco Ceppi', maintainer_email='marco@ceppi.net', description=('Tools for maintaining Juju charms'), license='GPL v3', url='https://github.com/juju/charm-tools', classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python", ], entry_points={ 'console_scripts': [ 'charm-add = charmtools.generate:main', 'charm-build = charmtools.build:main', 'charm-create = charmtools.create:main', 'charm-help = charmtools.cli:usage', 'charm-layers = charmtools.build:inspect', 'charm-proof = charmtools.proof:main', 'charm-pull-source = charmtools.pullsource:main', 'charm-test = charmtools.test:main', 'charm-version = charmtools.version:main', 'juju-test = charmtools.test:main', ], 'charmtools.templates': [ 'bash = charmtools.templates.bash:BashCharmTemplate', 'reactive-python = charmtools.templates.reactive_python:ReactivePythonCharmTemplate', 'reactive-bash = charmtools.templates.reactive_bash:ReactiveBashCharmTemplate', 'python-basic = charmtools.templates.python:PythonCharmTemplate', 'python = charmtools.templates.python_services' ':PythonServicesCharmTemplate', 'ansible = charmtools.templates.ansible:AnsibleCharmTemplate', 'chef = charmtools.templates.chef:ChefCharmTemplate', 'powershell = ' 'charmtools.templates.powershell:PowerShellCharmTemplate', ] }, ) charm-tools-2.1.2/doc/0000775000175000017500000000000012677251067014754 5ustar marcomarco00000000000000charm-tools-2.1.2/doc/source/0000775000175000017500000000000012677251067016254 5ustar marcomarco00000000000000charm-tools-2.1.2/doc/source/build.md0000664000175000017500000000750212650157641017673 0ustar marcomarco00000000000000Juju Charm Building =================== Status | *Beta* ------- ------- Today its very common to fork charms for minor changes or to have to use subordinate charms to take advantages of frameworks where you need to deploy a custom workload to an existing runtime. With charm build you should be able to include from a charm that provides the runtime (or just some well contained feature set) and maintain you're delta as a 'layer' that gets built with its base to produce a new charm. This process should be runnable repeatedly allowing charms to be regenerated. Today the system can be run as follows: charm build -o So you might use the included (very unrealistic) test case as like: charm build -o out -n foo tests/trusty/tester Running this should produce a charm in out/trusty/foo which is built according to the layer.yaml file in tests/trusty/tester. Theory ====== A built charm is composed of layers. The generator acts almost like a compiler taking the input from each layer and producing an output file in the resultant charm. The generator keeps track of which layer owns each file and allows layers to update files they own should the charm be refreshed later. The generated charm itself should be treated as immutable. The top layer that was used to generate it is where user level modifications should live. Setting Up your Repo ==================== This currently allows for two new ENV variables when run LAYER_PATH: a ':' separated list of JUJU_REPOSITORY that should be searched for includes INTERFACE_PATH: a ':' separated list of paths to resolve interface:_name_ includes from. JUJU_REPOSITORY entries take the usual format *series*/*charm* INTERFACE repos take the format of *interface_name*. Where interface_name is the name as it appears in the metadata.yaml Build Tactics ============= Each file in each layer gets matched by a single Tactic. Tactics implement how the data in a file moves from one layer to the next (and finally to the target charm). By default this will be a simple copy but in the cases of certain files (mostly known YAML files like metadata.yaml and config.yaml) each layer is combined with the previous layers before being written. Normally the default tactics are fine but you have the ability in the layer.yaml to list a set of Tactics objects that will be checked before the default and control how data moves from one layer to the next. layer.yaml ========== Each layer used to build a charm can have a layer.yaml file. The top layer (the one actually invoked from the command line) must. These tell the generator what do, ranging from which base layers to include, to which interfaces. They also allow for the inclusion of specialized directives for processing some types of files. Keys: includes: ["trusty/mysql", "interface:mysql"] tactics: [ dottedpath.toTacticClass, ] config: deletes: - key names metadata: deletes: - key names Includes is a list of one or more layers and interfaces that should be built Those layers may themselves have other includes and/or interfaces. Tactics is a list of Tactics to be loaded. See charmtools.build.tactics.Tactics for the default interface. You'll typically need to implement at least a trigger() method and a __call__() method. config and metadata take optional lists of keys to remove from config.yaml and metadata.yaml when generating their data. This allows for charms to, for example, narrow what they expose to clients. charm layers ============ If you've already generated a charm you can see which layers own which files by using the include **charm layers [charmdir]*** command. This should render a tree of the files in the color of each layer. Each layers assigned color is presented in a legend at the top of the output. charm-tools-2.1.2/README.md0000664000175000017500000000333712650157641015466 0ustar marcomarco00000000000000# Juju Charm Tools [![Build Status](https://travis-ci.org/juju/charm-tools.svg?branch=master)](https://travis-ci.org/juju/charm-tools) This is a collection of tools to make writing Juju charms easier. See Juju's home page for more information. https://jujucharms.com/ ## Quick Start ### Get Juju First, you'll need Juju. If its not available in your version of Ubuntu you can use the PPA: sudo add-apt-repository ppa:juju/stable sudo apt-get update sudo apt-get install juju-core ### Get Charm Tools Most people will want to install charm-tools from the Juju PPA. sudo apt-get install charm-tools Alternatively you can download the project and run the tip of the code. git clone http://github.com/juju/charm-tools cd charm-tools # Tools In order to use any of these tools you need to have Juju setup and working in your path ## create To generate a new charm from a Debian package available on your system juju charm create foo This should add a directory to charms with the name foo, and some of the metadata.yml and hooks filled in. It will create these in $CHARM_HOME or under the current working directory. ## proof To perform basic static analysis on a charm, run juju charm proof foo It will analyze the charm for any obvious mistakes. ## getall Retrieves all of the charms in the charm distribution via bzr. ## subscribers This is used to check the quality of maintainer<->bug subscriptions in launchpad since we do not have this relationship automatically setup. As a maintainer, if you would like to ensure that you are subscribed to all of your charms you can run this command: juju charm subscribers --fix-unsubscribed --maintainer you@youremail.com --repository path/to/your/charms charm-tools-2.1.2/MANIFEST.in0000664000175000017500000000016312650157641015737 0ustar marcomarco00000000000000include *.py README* include doc/source/build.md recursive-include charmtools * recursive-exclude charmtools *.pyc charm-tools-2.1.2/setup.cfg0000664000175000017500000000017712677251067016035 0ustar marcomarco00000000000000[nosetests] verbosity = 1 detailed-errors = 1 logging-level = INFO [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 charm-tools-2.1.2/PKG-INFO0000664000175000017500000000061112677251067015302 0ustar marcomarco00000000000000Metadata-Version: 1.1 Name: charm-tools Version: 2.1.2 Summary: Tools for maintaining Juju charms Home-page: https://github.com/juju/charm-tools Author: Marco Ceppi Author-email: marco@ceppi.net License: GPL v3 Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python charm-tools-2.1.2/charm_tools.egg-info/0000775000175000017500000000000012677251067020213 5ustar marcomarco00000000000000charm-tools-2.1.2/charm_tools.egg-info/requires.txt0000664000175000017500000000026012677251067022611 0ustar marcomarco00000000000000launchpadlib argparse cheetah pyyaml pycrypto paramiko requests libcharmstore blessings ruamel.yaml pathspec otherstuf path.py pip jujubundlelib virtualenv colander jsonschema charm-tools-2.1.2/charm_tools.egg-info/dependency_links.txt0000664000175000017500000000000112677251067024261 0ustar marcomarco00000000000000 charm-tools-2.1.2/charm_tools.egg-info/entry_points.txt0000664000175000017500000000166212677251067023516 0ustar marcomarco00000000000000[charmtools.templates] ansible = charmtools.templates.ansible:AnsibleCharmTemplate bash = charmtools.templates.bash:BashCharmTemplate chef = charmtools.templates.chef:ChefCharmTemplate powershell = charmtools.templates.powershell:PowerShellCharmTemplate python = charmtools.templates.python_services:PythonServicesCharmTemplate python-basic = charmtools.templates.python:PythonCharmTemplate reactive-bash = charmtools.templates.reactive_bash:ReactiveBashCharmTemplate reactive-python = charmtools.templates.reactive_python:ReactivePythonCharmTemplate [console_scripts] charm-add = charmtools.generate:main charm-build = charmtools.build:main charm-create = charmtools.create:main charm-help = charmtools.cli:usage charm-layers = charmtools.build:inspect charm-proof = charmtools.proof:main charm-pull-source = charmtools.pullsource:main charm-test = charmtools.test:main charm-version = charmtools.version:main juju-test = charmtools.test:main charm-tools-2.1.2/charm_tools.egg-info/top_level.txt0000664000175000017500000000001312677251067022737 0ustar marcomarco00000000000000charmtools charm-tools-2.1.2/charm_tools.egg-info/SOURCES.txt0000664000175000017500000001566012677251067022107 0ustar marcomarco00000000000000MANIFEST.in README.md setup.cfg setup.py charm_tools.egg-info/PKG-INFO charm_tools.egg-info/SOURCES.txt charm_tools.egg-info/dependency_links.txt charm_tools.egg-info/entry_points.txt charm_tools.egg-info/requires.txt charm_tools.egg-info/top_level.txt charmtools/__init__.py charmtools/bundles.py charmtools/charms.py charmtools/cli.py charmtools/create.py charmtools/diff_match_patch.py charmtools/fetchers.py charmtools/generate.py charmtools/linter.py charmtools/list.py charmtools/mr.py charmtools/proof.py charmtools/pullsource.py charmtools/repofinder.py charmtools/test.py charmtools/unpromulgate.py charmtools/utils.py charmtools/version.py charmtools/build/__init__.py charmtools/build/config.py charmtools/build/fetchers.py charmtools/build/inspector.py charmtools/build/tactics.py charmtools/generators/__init__.py charmtools/generators/generator.py charmtools/generators/prompt.py charmtools/generators/template.py charmtools/generators/utils.py charmtools/templates/__init__.py charmtools/templates/ansible/__init__.py charmtools/templates/ansible/template.py charmtools/templates/ansible/files/Makefile charmtools/templates/ansible/files/README.ex charmtools/templates/ansible/files/charm-helpers.yaml charmtools/templates/ansible/files/config.yaml charmtools/templates/ansible/files/icon.svg charmtools/templates/ansible/files/metadata.yaml charmtools/templates/ansible/files/revision charmtools/templates/ansible/files/hooks/hooks.py charmtools/templates/ansible/files/playbooks/site.yaml charmtools/templates/ansible/files/scripts/charm_helpers_sync.py charmtools/templates/ansible/files/unit_tests/test_hooks.py charmtools/templates/bash/__init__.py charmtools/templates/bash/template.py charmtools/templates/bash/files/README.ex charmtools/templates/bash/files/config.yaml charmtools/templates/bash/files/icon.svg charmtools/templates/bash/files/metadata.yaml charmtools/templates/bash/files/revision charmtools/templates/bash/files/hooks/config-changed charmtools/templates/bash/files/hooks/install charmtools/templates/bash/files/hooks/relation-name-relation-broken charmtools/templates/bash/files/hooks/relation-name-relation-changed charmtools/templates/bash/files/hooks/relation-name-relation-departed charmtools/templates/bash/files/hooks/relation-name-relation-joined charmtools/templates/bash/files/hooks/start charmtools/templates/bash/files/hooks/stop charmtools/templates/bash/files/hooks/upgrade-charm charmtools/templates/charm/README.ex charmtools/templates/charm/icon.svg charmtools/templates/chef/__init__.py charmtools/templates/chef/template.py charmtools/templates/chef/files/README.ex charmtools/templates/chef/files/config.yaml charmtools/templates/chef/files/icon.svg charmtools/templates/chef/files/metadata.yaml charmtools/templates/chef/files/cookbooks/Gemfile charmtools/templates/chef/files/cookbooks/Gemfile.lock charmtools/templates/chef/files/cookbooks/charm-name/metadata.rb charmtools/templates/chef/files/cookbooks/charm-name/recipes/config-changed.rb charmtools/templates/chef/files/cookbooks/charm-name/recipes/install.rb charmtools/templates/chef/files/cookbooks/charm-name/recipes/start.rb charmtools/templates/chef/files/cookbooks/charm-name/recipes/stop.rb charmtools/templates/chef/files/cookbooks/charm-name/recipes/upgrade-charm.rb charmtools/templates/chef/files/cookbooks/juju-helpers/metadata.rb charmtools/templates/chef/files/cookbooks/juju-helpers/definitions/juju_port.rb charmtools/templates/chef/files/cookbooks/juju-helpers/definitions/relation_set.rb charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju.rb charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers.rb charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers_dev.rb charmtools/templates/chef/files/cookbooks/relation-name-relation/metadata.rb charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/broken.rb charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/changed.rb charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/departed.rb charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/joined.rb charmtools/templates/chef/files/hooks/bootstrap charmtools/templates/chef/files/hooks/config-changed charmtools/templates/chef/files/hooks/install charmtools/templates/chef/files/hooks/relation-name-relation-broken charmtools/templates/chef/files/hooks/relation-name-relation-changed charmtools/templates/chef/files/hooks/relation-name-relation-departed charmtools/templates/chef/files/hooks/relation-name-relation-joined charmtools/templates/chef/files/hooks/start charmtools/templates/chef/files/hooks/stop charmtools/templates/chef/files/hooks/stub charmtools/templates/chef/files/hooks/upgrade-charm charmtools/templates/chef/files/tests/00-setup charmtools/templates/chef/files/tests/99-autogen charmtools/templates/powershell/__init__.py charmtools/templates/powershell/template.py charmtools/templates/python/__init__.py charmtools/templates/python/template.py charmtools/templates/python/files/README.ex charmtools/templates/python/files/charm-helpers.yaml charmtools/templates/python/files/config.yaml charmtools/templates/python/files/icon.svg charmtools/templates/python/files/metadata.yaml charmtools/templates/python/files/revision charmtools/templates/python/files/hooks/config-changed charmtools/templates/python/files/hooks/install charmtools/templates/python/files/hooks/start charmtools/templates/python/files/hooks/stop charmtools/templates/python/files/hooks/upgrade-charm charmtools/templates/python/files/scripts/charm_helpers_sync.py charmtools/templates/python/files/tests/00-setup charmtools/templates/python/files/tests/10-deploy charmtools/templates/python_services/__init__.py charmtools/templates/python_services/config.yaml charmtools/templates/python_services/template.py charmtools/templates/python_services/files/README.example charmtools/templates/python_services/files/config.yaml charmtools/templates/python_services/files/icon.svg charmtools/templates/python_services/files/metadata.yaml charmtools/templates/python_services/files/hooks/actions.py charmtools/templates/python_services/files/hooks/config-changed charmtools/templates/python_services/files/hooks/install charmtools/templates/python_services/files/hooks/services.py charmtools/templates/python_services/files/hooks/setup.py charmtools/templates/python_services/files/hooks/start charmtools/templates/python_services/files/hooks/stop charmtools/templates/python_services/files/hooks/upgrade-charm charmtools/templates/python_services/files/templates/upstart.conf charmtools/templates/python_services/files/tests/00-setup charmtools/templates/python_services/files/tests/10-deploy charmtools/templates/python_services/files/unit_tests/test_actions.py charmtools/templates/reactive_bash/__init__.py charmtools/templates/reactive_bash/template.py charmtools/templates/reactive_python/__init__.py charmtools/templates/reactive_python/template.py charmtools/templates/tests/99-autogen.tpl doc/source/build.mdcharm-tools-2.1.2/charm_tools.egg-info/PKG-INFO0000664000175000017500000000061112677251067021306 0ustar marcomarco00000000000000Metadata-Version: 1.1 Name: charm-tools Version: 2.1.2 Summary: Tools for maintaining Juju charms Home-page: https://github.com/juju/charm-tools Author: Marco Ceppi Author-email: marco@ceppi.net License: GPL v3 Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python charm-tools-2.1.2/charmtools/0000775000175000017500000000000012677251067016362 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/test.py0000775000175000017500000006277312676737527017746 0ustar marcomarco00000000000000#!/usr/bin/python # coding=utf-8 import argparse import glob import logging import os import re import signal import subprocess import sys import time import yaml from datetime import timedelta from collections import OrderedDict from contextlib import contextmanager TEST_PASS = '✔' TEST_FAIL = '✘' TEST_SKIP = '↷' TEST_TIMEOUT = '⌛' TEST_STATUS = {'pass': TEST_PASS, 'fail': TEST_FAIL, 'skip': TEST_SKIP, 'timeout': TEST_TIMEOUT} TEST_RESERVED_EXITS = {0: 'pass', 100: 'skip', 124: 'timeout'} LOG_LEVELS = [logging.INFO, logging.DEBUG] TEST_RESULT_LEVELV_NUM = 51 ENV_WHITELIST = ['PATH', 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'PYTHONPATH', 'HOME'] class NoTests(Exception): pass class BootstrapError(Exception): pass class BootstrapUnreliable(Exception): pass class DestroyUnreliable(Exception): pass class SubstrateMismatch(Exception): pass class TimeoutError(Exception): def __init__(self, value="Timed Out"): self.value = value class TestingError(Exception): pass class OrchestraError(Exception): pass class Conductor(object): def __init__(self, arguments=None): self.args = arguments # Default home to what juju defaults to, can be overridden with "-p" self.env = {'JUJU_HOME': os.path.expanduser('~/.juju')} if arguments.preserve_environment_variables: for var in arguments.preserve_environment_variables.split(","): ENV_WHITELIST.append(var) for var in ENV_WHITELIST: if var in os.environ: self.env[var] = os.environ[var] self.log = logging.getLogger('juju-test.conductor') self.tests = self.find_tests() self.tests_requested = self.args.tests self.juju_version = None self.juju_env = self.args.juju_env self.errors = 0 self.fails = 0 self.passes = 0 if self.tests_requested: self.tests_requested = [os.path.basename(t) for t in self.tests_requested] if not self.tests: raise NoTests() def run(self): self.juju_version = get_juju_version() requested_tests = self.tests if self.tests_requested: for test in self.tests: if test not in self.tests_requested: del self.tests[test] for test in requested_tests.values(): try: self.bootstrap(self.juju_env, self.args.setup_timeout) except Exception, e: self.log.warn('Could not bootstrap %s, got %s. Skipping' % (self.juju_env, e)) self.errors += 1 continue try: t = Orchestra(self, test) t.perform() except: self.fails += 1 if self.args.set_e: self.log.info('Breaking here as requested by --set-e') return self.errors, self.fails, self.passes else: self.passes += 1 try: self.destroy(self.juju_env) except DestroyUnreliable: self.log.warn('Unable to destroy bootstrap, trying again') time.sleep(2) try: self.destroy(self.juju_env) except: continue return self.errors, self.fails, self.passes def find_tests(self): tests_dir = glob.glob(os.path.join('tests', '*')) if not tests_dir: return None # Filter out only the files in tests/ then get the test names. tests = [t for t in tests_dir if os.path.isfile(t)] # only include executables tests = [(os.path.basename(t), t) for t in tests if os.access(t, os.R_OK | os.X_OK)] result = OrderedDict() # keep sort order as well as indexed lookups for basename, test in sorted(tests): result[basename] = test return result def safe_test_name(self, test_name): return test_name def isolate_environment(self, juju_env): # Should probably do something other than NotImplementedError... raise NotImplementedError() def get_environment(self, juju_env): juju_home = self.env['JUJU_HOME'] try: env_yaml = self.load_environments_yaml(juju_home) except IOError: raise # Do something more clever here? if not juju_env in env_yaml['environments']: raise KeyError('%s does not exist in %s/environments.yaml' % (juju_env, juju_home)) return env_yaml['environments'][juju_env] def bootstrap(self, juju_env, wait_for=400): self.log.debug('Starting a bootstrap for %s, kill after %s' % (juju_env, wait_for)) cmd = ['juju', 'bootstrap'] if self.juju_version.major > 0: if self.args.upload_tools: cmd.append('--upload-tools') if self.args.constraints: cmd.extend(['--constraints', self.args.constraints]) cmd.extend(['-e', juju_env]) self.log.debug('Running the following: %s' % ' '.join(cmd)) try: subprocess.check_call(cmd, env=self.env) except subprocess.CalledProcessError: raise BootstrapError('Bootstrap returned with an exit > 0') self.log.debug('Waiting for bootstrap') try: with timeout(wait_for): self.wait_for_bootstrap(juju_env) except TimeoutError: try: self.destroy(self.juju_env) except: pass raise BootstrapUnreliable('Bootstrap timeout after %ss' % wait_for) def destroy(self, juju_env): self.log.debug('Tearing down %s juju environment' % juju_env) cmd = ['juju', 'destroy-environment'] if self.juju_version.major > 0: if self.juju_version.minor < 17: cmd.extend(['-y', '-e', juju_env]) else: cmd.extend(['-y', juju_env]) self.log.debug('Calling "%s"' % ' '.join(cmd)) try: subprocess.check_call(cmd, env=self.env) except subprocess.CalledProcessError: raise DestroyUnreliable('Unable to destroy %s' % juju_env) else: # Probably should use Popen instead of Shell=True. I'm just not # confident on properly mocking a Popen call just yet. cmd.extend(['-e', juju_env]) pycmd = 'echo y | %s' % ' '.join(cmd) self.log.debug('Calling "%s"' % pycmd) try: subprocess.check_call(pycmd, shell=True, env=self.env) except subprocess.CalledProcessError: raise DestroyUnreliable('Unable to destroy %s' % juju_env) def status(self, juju_env): cmd = ['juju', 'status', '-e', juju_env] self.log.debug('Running the following: %s' % ' '.join(cmd)) try: output = subprocess.check_output(cmd, env=self.env) except: self.log.debug('Status command failed, returning nothing') return None return yaml.safe_load(output) def wait_for_bootstrap(self, juju_env): bootstrapped = False while not bootstrapped: self.log.debug('Still not bootstrapped') time.sleep(5) status = self.status(juju_env) if not status: continue if self.juju_version.major > 0: self.log.debug('State for %s: %s' % (self.juju_version, status['machines']['0']['agent-state'])) if status['machines']['0']['agent-state'] == 'started': bootstrapped = True else: self.log.debug('State for %s: %s' % (self.juju_version, status['machines'][0]['agent-state'])) if status['machines'][0]['agent-state'] == 'running': bootstrapped = True def load_environments_yaml(self, juju_home='~/.juju'): env_yaml_file = os.path.join(os.path.expanduser(juju_home), 'environments.yaml') if not os.path.exists(env_yaml_file): raise IOError("%s file does not exist" % env_yaml_file) with open(env_yaml_file, 'r') as y: return yaml.safe_load(y.read()) class Orchestra(object): def __init__(self, conductor, arrangement): self.conductor = conductor self.test = arrangement self.name = os.path.basename(self.test) self.safe_name = self.conductor.safe_test_name(self.name) self.log = logging.getLogger('juju-test.conductor.%s' % self.safe_name) self.archive = self.conductor.args.logdir self.env = self.conductor.env def perform(self): self.build_env() error = None self.log.debug('Running %s (%s)' % (self.name, self.test)) try: with timeout(self.conductor.args.timeout): output = subprocess.check_output(self.test, env=self.env) self.log.debug(output) except TimeoutError, e: self.log.debug('Killed by timeout after %s seconds', self.conductor.args.timeout) self.print_status(124) error = e if not self.is_passing_code(124) else e except subprocess.CalledProcessError, e: self.log.debug(e.output) self.log.debug('Got exit code: %s', e.returncode) self.print_status(e.returncode) error = TestingError(e.returncode) if not \ self.is_passing_code(e.returncode) else e except Exception, e: self.log.debug('Encountered unexpected error %s', e) self.print_status(9001) error = e else: self.print_status(0) if self.conductor.args.logdir: try: self.archive_logs() except OrchestraError, e: self.log.error(e) if error: raise error def archive_logs(self): logs = ['/var/./log/juju/*'] status = self.conductor.status(self.env["JUJU_ENV"]) logdir = self.conductor.args.logdir if not status: # Something is wrong, we need to throw up an archive error raise OrchestraError('Unable to query juju status') services = status['services'] # machines = status['machines'] if self.conductor.juju_version.major == 0: logs.append('/var/lib/juju/units/./*/charm.log') try: self.rsync(0, logs[0], os.path.join(logdir, 'bootstrap', '')) except: self.log.warn('Failed to fetch logs for bootstrap node') for service in services: for unit in services[service]['units']: machine = services[service]['units'][unit]['machine'] for log in logs: try: self.rsync(machine, log, os.path.join(logdir, service, '')) except: self.log.warn('Failed to grab logs for %s' % unit) def print_status(self, exit_code): actual = self.map_status_code(exit_code) computed = self.determine_status(exit_code) if actual != computed: self.log.status('%s (%s)' % (TEST_STATUS[computed], TEST_STATUS[actual])) else: self.log.status(TEST_STATUS[actual]) # The next three methods need to be collapsed and consolidated def determine_status(self, exit_code): if exit_code == 124: timeout_status = self.conductor.args.on_timeout reversed_codes = {v: k for k, v in TEST_RESERVED_EXITS.items()} if timeout_status in reversed_codes.keys(): exit_code = reversed_codes[self.conductor.args.on_timeout] else: return timeout_status if not exit_code in TEST_RESERVED_EXITS.keys(): return 'fail' if exit_code == 100 and self.conductor.args.fail_on_skip: return 'fail' elif exit_code == 100: return 'skip' if exit_code == 0: return 'pass' def is_passing_code(self, exit_code): if exit_code == 124: timeout_status = self.conductor.args.on_timeout reversed_codes = {v: k for k, v in TEST_RESERVED_EXITS.items()} if timeout_status in reversed_codes.keys(): exit_code = reversed_codes[self.conductor.args.on_timeout] if exit_code == 0: return True if exit_code == 100 and not self.conductor.args.fail_on_skip: return True return False def map_status_code(self, exit_code): if exit_code in TEST_RESERVED_EXITS.keys(): return TEST_RESERVED_EXITS[exit_code] else: return 'fail' def build_env(self): self.env["JUJU_ENV"] = self.conductor.juju_env def rsync(self, machine, path, dest): if self.conductor.juju_version.major == 0: cmd = ['rsync', '-a', '-v', '-z', '-R', '-e', 'juju ssh -e %s' % self.env['JUJU_ENV'], '%s:%s' % (machine, path), dest] else: # http://pad.lv/1183159 status = self.conductor.status(self.env['JUJU_ENV']) dns_name = status['machines'][machine]['dns-name'] cmd = ['rsync', '-a', '-v', '-z', '-R', '-e', 'ssh', 'ubuntu@%s:%s' % (dns_name, path), dest] subprocess.check_call(cmd, env=self.env) # Build Juju class instead? Move bootstrap, wait_for_bootstrap, teardown? class JujuVersion(object): def __init__(self, major=0, minor=0, patch=0): self.mapping = ['major', 'minor', 'patch'] self.major = major self.minor = minor self.patch = patch def __str__(self): return '.'.join(str(v) for v in [self.major, self.minor, self.patch]) class TestCfg(object): _keys = ['timeout', 'set-e', 'on-timeout', 'fail-on-skip', 'tests'] def __init__(self, cfg): self.log = logging.getLogger('juju-test.testcfg') if isinstance(cfg, basestring): cfg = yaml.safe_load(cfg) if 'options' in cfg: for key, val in cfg['options'].iteritems(): if key in self._keys: setattr(self, key, val) if 'substrates' in cfg: self.substrates = cfg['substrates'] def update(self, **kw): for key, val in kw.iteritems(): self.log.debug('Overwriting %s to %s from cmd' % (key, val)) setattr(self, key, val) def get_juju_version(): jv = JujuVersion() cmd = ['juju', 'version'] try: version = subprocess.check_output(cmd) version = version.split('-')[0] except: cmd[1] = '--version' version = subprocess.check_output(cmd) version = version.split()[1] for i, ver in enumerate(version.split('.')): try: setattr(jv, jv.mapping[i], int(ver)) except: break # List out of range? Versions not semantic? Not my problem. return jv def setup_logging(level=0, quiet=False, logdir=None): logger = logging.getLogger('juju-test') ft = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s: %(message)s') cmt = logging.Formatter('%(name)s %(levelname)-8s: %(message)s') logger.setLevel(1) if level >= len(LOG_LEVELS): level = len(LOG_LEVELS) - 1 if logdir: if not os.path.exists(logdir): os.makedirs(logdir) logfile = os.path.join(logdir, 'juju-test.%s.log' % int(time.time())) fh = logging.FileHandler(logfile) # Always at least log to INFO for file, unless DEBUG is requested fh.setLevel(LOG_LEVELS[level]) fh.setFormatter(ft) logger.addHandler(fh) if not quiet: ch = logging.StreamHandler() ch.setLevel(LOG_LEVELS[level]) ch.setFormatter(cmt) logger.addHandler(ch) return logger class SubstrateFilter(object): def __init__(self, spec): self.order = spec.get('order', ['include', 'skip']) self.include = spec.get('include', ['*']) self.skip = spec.get('skip', []) if isinstance(self.order, str): self.order = [s.strip() for s in self.order.split(',')] if self.order != ['include', 'skip'] and \ self.order != ['skip', 'include']: raise ValueError( 'order should be defined using only include and skip') if isinstance(self.include, str): self.include = [self.include] self.include = set(self.include) if isinstance(self.skip, str): self.skip = [self.skip] self.skip = set(self.skip) def filter(self, substrates): """ Filter a list of substrates relative to the rules generated on class creation. """ if isinstance(substrates, str): substrates = [s.strip() for s in re.split('[,\s]', substrates)] # Apply the rules to the list of substrates returning anything that # matches if self.order == ['include', 'skip']: result = self._filter_includes(substrates, True) result = self._filter_skips(result) else: result = self._filter_skips(substrates, True) result = self._filter_includes(result) return result def _filter_includes(self, inputList, priority=False): if priority and '*' in self.include: return inputList return sorted(list(set(inputList).intersection(self.include))) def _filter_skips(self, inputList, priority=False): if priority and '*' in self.skip: return list(self.include.intersection(inputList)) return sorted(list(set(inputList).difference(self.skip))) def parse_substrates(spec): """Return a :class:`SubstrateFilter` object parsed from ``spec``. :param spec: Can be a yaml string, a dict with a 'substrates' key, or an object with a 'substrates' attribute. The 'substrates' key or attribute should contain a dict with optional 'order', 'skip', and 'include' keys. """ if isinstance(spec, basestring): spec = yaml.safe_load(spec) elif not hasattr(spec, '__getitem__'): spec = vars(spec) if not spec or 'substrates' not in spec: raise ValueError( "Invalid data passed to parse_substrates: {}".format(spec)) specRules = SubstrateFilter(spec['substrates']) return specRules def allowed_substrates(spec, possible_substrates): return parse_substrates(spec).filter(possible_substrates) def setup_parser(): parser = argparse.ArgumentParser( prog='juju test', formatter_class=argparse.RawDescriptionHelpFormatter, description='execute charm functional tests', epilog="""\ `%(prog)s` should always be run from within a CHARM_DIR. examples: Given the following example charm layout: . ├── config.yaml ├── copyright ├── hooks │   └── ... ├── icon.svg ├── metadata.yaml ├── README.md └── tests ├── 00-tool_setup ├── 01-standard ├── 02-at_scale └── 03-different_database Run all tests for current charm %(prog)s Run one or more tests %(prog)s 01-standard 03-different_database output: Each unit test will return an output in the form or either: RESULT : SYM or RESULT : SYM (SYM) Where SYM is a Symbol representing PASS: ✔, FAIL: ✘, SKIP: ↷, or TIMEOUT: ⌛ In the event a status is rewritten by either the --fail-on-skip flag or the --on-timeout flag the original status will be displayed in () next to the computed status. """) # Plugin specific parser.add_argument('--description', action="store_true", help="produces one-line description for juju-core") # Tester specific # Options ommited from jitsu: --archive-only, --no-bootstrap parser.add_argument('--timeout', default=600, action=StoreTimedelta, help="timeout per unit test. Examples: 10m, 300s") parser.add_argument('--isolate', metavar='JUJU_ENV', help="create unique environment cloned from JUJU_ENV") parser.add_argument('-o', '--logdir', help="directory to store service logs from each test") # New tester options parser.add_argument('--setup-timeout', default=300, action=StoreTimedelta, help="timeout to wait for an environment to be set up") parser.add_argument('--fail-on-skip', default=False, action="store_true", help="treat SKIP (100) status as a failure, will \ halt execution with --set-e") parser.add_argument('--on-timeout', default='skip', choices=['fail', 'skip', 'pass'], help="treat tests which timeout as (fail, skip, pass)") parser.add_argument('--set-e', default=False, action="store_true", help="stop testing execution on first failed test") parser.add_argument('-v', action='count', default=0, help="make test more verbose") parser.add_argument('-q', '--quiet', default=False, action="store_true", help="quiet all output") parser.add_argument('-p', '--preserve-environment-variables', help="Comma separated list of environment variables " "to preserve. This will be added to the default list " "of {}.".format(ENV_WHITELIST)) # These are bootstrap/juju specific parser.add_argument('-e', '--environment', metavar='JUJU_ENV', default=os.environ.get('JUJU_ENV'), dest='juju_env', #required=True, help="juju environment to operate in") parser.add_argument('--upload-tools', default=False, action="store_true", help="upload juju tools (available for juju > 1.x)") parser.add_argument('--constraints', help="juju environment constraints") # General options # TODO: Need to add some way to specify a particular test without # colliding with CHARM parser.add_argument('tests', metavar="TEST", nargs='*', default=None, help="tests to execute, relative to tests/ directory, \ default is all") return parser # http://stackoverflow.com/a/11784984/196832 def status(self, message, *args, **kws): self._log(TEST_RESULT_LEVELV_NUM, message, args, **kws) logging.addLevelName(TEST_RESULT_LEVELV_NUM, "RESULT") logging.Logger.status = status def main(): p = setup_parser() args = p.parse_args() test_cfg = None if args.description: print p.description sys.exit() logger = setup_logging(level=args.v, quiet=args.quiet, logdir=args.logdir) logger.info('Starting test run on %s using Juju %s' % (args.juju_env, get_juju_version())) logger.debug('Loading configuration options from testplan YAML') test_plans = glob.glob(os.path.join(os.getcwd(), 'tests', 'test_config.y*ml')) if test_plans: with open(test_plans[0]) as f: test_cfg = f.read() if test_cfg: cfg = TestCfg(test_cfg) cfg.update(**vars(args)) else: cfg = args logger.debug('Creating a new Conductor') try: tester = Conductor(args) env_yaml = tester.get_environment(cfg.juju_env) if getattr(cfg, 'substrates', None): rules = parse_substrates(cfg) allowed = rules.filter(env_yaml['type']) if env_yaml['type'] not in allowed: raise Exception('%s is not an allowed substrate: %s' % (env_yaml['type'], allowed.join(', '))) errors, failures, passes = tester.run() except NoTests: logger.critical('No tests were found') sys.exit(1) except Exception as e: logger.critical(str(e)) sys.exit(1) except: raise logger.info('Results: %s passed, %s failed, %s errored' % (passes, failures, errors)) if failures > 0: sys.exit(failures) elif errors > 0: sys.exit(124) # Nothing failed, but there were errors! sys.exit(0) # http://stackoverflow.com/a/601168/196832 @contextmanager def timeout(seconds): def signal_handler(signum, frame): raise TimeoutError() signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) class StoreTimedelta(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, convert_to_timedelta(values)) # http://bit.ly/13Mi5QV def convert_to_timedelta(time_val): if isinstance(time_val, int): return time_val if time_val.isdigit(): return int(time_val) num = int(time_val[:-1]) if time_val.endswith('s'): return timedelta(seconds=num).seconds elif time_val.endswith('m'): return timedelta(minutes=num).seconds elif time_val.endswith('h'): return timedelta(hours=num).seconds if __name__ == '__main__': main() charm-tools-2.1.2/charmtools/pullsource.py0000775000175000017500000001570012676737527021150 0ustar marcomarco00000000000000#!/usr/bin/python # # pull-source - Fetch source for charm, layers, and interfaces # # Copyright (C) 2016 Canonical Ltd. # Author: Tim Van Steenburgh # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """download the source code for a charm, layer, or interface. The item to download can be specified using any of the following forms: - [cs:]charm - [cs:]series/charm - [cs:]~user/charm - [cs:]~user/series/charm - layer:layer-name - interface:interface-name If the item is a layered charm, and the top layer of the charm has a repo key in layer.yaml, the top layer repo will be cloned. Otherwise, the charm archive will be downloaded and extracted from the charm store. If a download directory is not specified, the following environment vars will be used to determine the download location: - For charms, $JUJU_REPOSITORY - For layers, $LAYER_PATH - For interfaces, $INTERFACE_PATH If a download location can not be determined from environment variables, the current working directory will be used. The download is aborted if the destination directory already exists. """ import argparse import atexit import logging import os import shutil import sys import tempfile import textwrap import yaml from . import utils from .build import fetchers from fetchers import ( CharmstoreDownloader, FETCHERS, get, ) log = logging.getLogger(__name__) LAYER_PREFIX = 'layer:' INTERFACE_PREFIX = 'interface:' CHARM_PREFIX = 'cs:' ERR_DIR_EXISTS = "Aborting, destination directory exists" class CharmstoreRepoDownloader(CharmstoreDownloader): """Clones a charm's bzr repo. If the a bzr repo is not set, falls back to :class:`fetchers.CharmstoreDownloader`. """ EXTRA_INFO_URL = CharmstoreDownloader.STORE_URL + '/meta/extra-info' def fetch(self, dir_): url = self.EXTRA_INFO_URL.format(self.entity) repo_url = get(url).json().get('bzr-url') if repo_url: try: fetcher = fetchers.get_fetcher(repo_url) except fetchers.FetchError: log.debug( "No fetcher for %s, downloading from charmstore", repo_url) return super(CharmstoreRepoDownloader, self).fetch(dir_) else: return fetcher.fetch(dir_) return super(CharmstoreRepoDownloader, self).fetch(dir_) FETCHERS.insert(0, CharmstoreRepoDownloader) class CharmstoreLayerDownloader(CharmstoreRepoDownloader): """Clones the repo containing the top layer of a charm. If the charm is not a layered charm, or the repo for the top layer can not be determined, falls back to using :class:`CharmstoreRepoDownloader`. """ LAYER_CONFIGS = ['layer.yaml', 'composer.yaml'] def fetch(self, dir_): for cfg in self.LAYER_CONFIGS: url = '{}/{}'.format( self.ARCHIVE_URL.format(self.entity), cfg) result = get(url) if not result.ok: continue repo_url = yaml.safe_load(result.text).get('repo') if not repo_url: continue try: fetcher = fetchers.get_fetcher(repo_url) except fetchers.FetchError: log.debug( 'Charm %s has a repo set in %s, but no fetcher could ' 'be found for the repo (%s).', self.entity, cfg, repo_url) break else: return fetcher.fetch(dir_) return super(CharmstoreLayerDownloader, self).fetch(dir_) FETCHERS.insert(0, CharmstoreLayerDownloader) def download_item(item, dir_): series_dir = None if item.startswith(LAYER_PREFIX): dir_ = dir_ or os.environ.get('LAYER_PATH') name = item[len(LAYER_PREFIX):] elif item.startswith(INTERFACE_PREFIX): dir_ = dir_ or os.environ.get('INTERFACE_PATH') name = item[len(INTERFACE_PREFIX):] else: dir_ = dir_ or os.environ.get('JUJU_REPOSITORY') if not item.startswith(CHARM_PREFIX): item = CHARM_PREFIX + item url_parts = item[len(CHARM_PREFIX):].split('/') name = url_parts[-1] if len(url_parts) == 2 and not url_parts[0].startswith('~'): series_dir = url_parts[0] elif len(url_parts) == 3: series_dir = url_parts[1] dir_ = dir_ or os.getcwd() dir_ = os.path.abspath(os.path.expanduser(dir_)) # Create series dir if we need to if series_dir: series_path = os.path.join(dir_, series_dir) if not os.path.exists(series_path): os.mkdir(series_path) dir_ = series_path # Abort if destination dir already exists final_dest_dir = os.path.join(dir_, name) if os.path.exists(final_dest_dir): return "{}: {}".format(ERR_DIR_EXISTS, final_dest_dir) # Create tempdir for initial download tempdir = tempfile.mkdtemp() atexit.register(shutil.rmtree, tempdir) try: # Download the item fetcher = fetchers.get_fetcher(item) download_dir = fetcher.fetch(tempdir) except fetchers.FetchError: return "Can't find source for {}".format(item) # Copy download dir to final destination dir shutil.copytree(download_dir, final_dest_dir) print('Downloaded {} to {}'.format(item, final_dest_dir)) def setup_parser(): parser = argparse.ArgumentParser( prog='charm pull-source', description=textwrap.dedent(__doc__), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( 'item', help='Name of the charm, layer, or interface to download.' ) parser.add_argument( 'dir', nargs='?', help='Directory in which to place the downloaded source.', ) parser.add_argument( '-v', '--verbose', help='Show verbose output', action='store_true', default=False, ) utils.add_plugin_description(parser) return parser def main(): parser = setup_parser() args = parser.parse_args() if args.verbose: logging.basicConfig( format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG, ) else: logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.WARN, ) return download_item(args.item, args.dir) if __name__ == "__main__": sys.exit(main()) charm-tools-2.1.2/charmtools/bundles.py0000664000175000017500000000574312676466472020410 0ustar marcomarco00000000000000import glob import os import re import yaml from linter import Linter import jujubundlelib.validation charm_url_includes_id = re.compile(r'-\d+$').search class BundleLinter(Linter): def validate(self, data): """Supplement jujubundlelib validation with some extra checks. """ if 'series' not in data and 'inherits' not in data: self.info("No series defined") if 'services' in data: for svc, sdata in data['services'].items(): if 'annotations' not in sdata: self.warn('%s: No annotations found, will render ' 'poorly in GUI' % svc) if ('charm' in sdata and not charm_url_includes_id(sdata['charm'] or '')): self.warn( '%s: charm URL should include a revision' % svc) else: if 'inherits' not in data: self.err("No services defined") def proof(self, bundle): data = bundle.bundle_file() if not bundle.is_v4(data): self.err( 'This bundle format is no longer supported. See ' 'https://jujucharms.com/docs/stable/charms-bundles ' 'for the supported format.') return readmes = glob.glob(os.path.join(bundle.bundle_path, 'README*')) if len(readmes) < 1: self.warn('No readme file found') errors = jujubundlelib.validation.validate(data) for error in errors: self.err(error) self.validate(data) class Bundle(object): def __init__(self, path, debug=False): self.bundle_path = os.path.abspath(path) self.supported_files = [ 'bundle.yaml', 'bundle.json', # v4 'bundles.yaml', 'bundles.json', # v3 ] self.debug = debug if not self.is_bundle(): raise Exception('Not a bundle') def is_bundle(self): for f in self.supported_files: if os.path.isfile(os.path.join(self.bundle_path, f)): break else: return False if os.path.isfile(os.path.join(self.bundle_path, 'metadata.yaml')): return False return True def is_v4(self, data=None): if data is None: data = self.bundle_file() v4_keys = {'services', 'relations', 'machines', 'series'} bundle_keys = set(data.keys()) return bool(v4_keys & bundle_keys) def bundle_file(self, parse=True): for f in self.supported_files: if os.path.isfile(os.path.join(self.bundle_path, f)): with open(os.path.join(self.bundle_path, f)) as d: return yaml.safe_load(d.read()) if parse else d.read() raise Exception('No bundle.json or bundle.yaml file found') def proof(self): lint = BundleLinter(self.debug) lint.proof(self) return lint.lint, lint.exit_code def promulgate(self): pass charm-tools-2.1.2/charmtools/mr.py0000664000175000017500000001224412650157641017347 0ustar marcomarco00000000000000# Copyright (C) 2013 Marco Ceppi . # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import ConfigParser from bzrlib import trace from bzrlib.bzrdir import BzrDir from bzrlib.branch import Branch from bzrlib.plugin import load_plugins from bzrlib.repository import Repository load_plugins() trace.enable_default_logging() # Provide better error handling class Mr: def __init__(self, directory=None, config=None, mr_compat=True): self.directory = directory or os.getcwd() self.control_dir = os.path.join(self.directory, '.bzr') self.config_file = config or os.path.join(self.directory, '.mrconfig') self.mr_compat = mr_compat if mr_compat: if self.__is_repository(): self.config = self.__read_cfg() self.bzr_dir = Repository.open(self.directory) else: self.config = ConfigParser.RawConfigParser() r = BzrDir.create(self.directory) self.bzr_dir = r.create_repository(shared=True) else: self.config = ConfigParser.RawConfigParser() self.bzr_dir = None def add(self, name, repository='lp:charms', checkout=False): # This isn't a true conversion of Mr, as such it's highly specialized # for just Charm Tools. So when you "add" a charm, it's just going # to use the charm name to fill in a template. Repository is in there # just in case we later add personal branching. '''Add a respository to the mrconfig''' if not name: raise Exception('No name provided') if not self.config.has_section(name): self.config.add_section(name) self.config.set(name, 'checkout', "bzr branch %s %s" % (repository, name)) if checkout: self.checkout(name) def checkout(self, name=None): '''Checkout either one or all repositories from the mrconfig''' if not name: for name in self.config.sections(): charm_remote = self.__get_repository(name) self.__checkout(charm_remote, os.path.join(self.directory, name)) else: # Move this, and the charm_* stuff to _checkout? Makes sense if not self.config.has_section(name): raise Exception('No configuration for %s' % name) charm_remote = self.__get_repository(name) self.__checkout(charm_remote, os.path.join(self.directory, name)) def update(self, name=None, force=False): '''Update, or checkout, a charm in to directory''' if name: self.__update(name) else: for charm in self.config.sections(): self.__update(charm) def remove(self, name=None): '''Remove a repository from the mrconfig''' if not name: raise Exception('No name provided') self.config.remove_section(name) def list(self): '''Return all sections of the mr configuration''' return self.config.sections() def exists(self, name): '''Checks if the configuration already exists for this section''' return self.config.has_section(name) def save(self): '''Save the configuration file to disk''' with open(self.config_file, 'w') as mrcfg: self.config.write(mrcfg) __write_cfg = save def __read_cfg(self): cfg = ConfigParser.ConfigParser() if not self.config_file: raise Exception('No .mrconfig specified') if os.path.exists(self.config_file): cfg.read(self.config_file) return cfg def __checkout(self, src, to): remote = Branch.open(src) remote.bzrdir.sprout(to) # I wish there was a way to 'close' a RemoteBranch. Sadly, # I don't think there is def __update(self, name): if not os.path.exists(os.path.join(self.directory, name, '.bzr')): return self.checkout(name) charm_remote = self.__get_repository(name) local_branch = Branch.open(os.path.join(self.directory, name)) remote_branch = Branch.open(charm_remote) local_branch.pull(remote_branch) def __get_repository(self, name): if not self.config.has_section(name): raise Exception('No section "%s" configured' % name) return self.config.get(name, 'checkout').split(' ')[-2] def __is_repository(self): try: r = Repository.open(self.directory) except: return False return r.is_shared() charm-tools-2.1.2/charmtools/fetchers.py0000664000175000017500000002417512672606236020545 0ustar marcomarco00000000000000import errno import logging import os import re import shlex import shutil import subprocess import tempfile import requests import yaml log = logging.getLogger(__name__) REQUEST_TIMEOUT_SECS = 45 def get(*args, **kw): if 'timeout' not in kw: kw['timeout'] = REQUEST_TIMEOUT_SECS return requests.get(*args, **kw) def is_int(string): try: int(string) return True except ValueError: return False def rename(dir_): """If ``dir_`` is a charm directory, rename it to match the charm name, otherwise do nothing. :param dir_: directory path :return: the new directory name (possibly unchanged). """ dir_ = dir_.rstrip(os.sep) metadata = os.path.join(dir_, "metadata.yaml") if not os.path.exists(metadata): return dir_ metadata = yaml.safe_load(open(metadata)) if not metadata: return dir_ name = metadata.get("name") if not name: return dir_ new_dir = os.path.join(os.path.dirname(dir_), name) if not os.path.exists(new_dir): # This ignores existing repos # In truth we want control over management of existing # repos with per VCS branch selections # ex: switching the git branch marked in revision os.rename(dir_, new_dir) return new_dir def extract_archive(archive, dir_): """Extract zip archive at filesystem path ``archive`` into directory ``dir_`` and return the full path to the directory containing the extracted archive. """ tempdir = tempfile.mkdtemp(dir=dir_) log.debug("Extracting %s to %s", archive, tempdir) # Can't extract with python due to bug that drops file # permissions: http://bugs.python.org/issue15795 # In particular, it's important that executable test files in the # archive remain executable, otherwise the tests won't be run. # Instead we use a shell equivalent of the following: # archive = zipfile.ZipFile(archive, 'r') # archive.extractall(tempdir) check_call('unzip {} -d {}'.format(archive, tempdir)) return tempdir def download_file(url, dir_): """Download file at ``url`` into directory ``dir_`` and return the full path to the downloaded file. """ _, filename = tempfile.mkstemp(dir=dir_) log.debug("Downloading %s", url) r = get(url, stream=True) with open(filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() return filename class Fetcher(object): def __init__(self, url, **kw): self.revision = '' self.url = url for k, v in kw.items(): setattr(self, k, v) @classmethod def can_fetch(cls, url): match = cls.MATCH.search(url) return match.groupdict() if match else {} def get_revision(self, dir_): dirlist = os.listdir(dir_) if '.bzr' in dirlist: rev_info = check_output('bzr revision-info', cwd=dir_) return rev_info.split()[1] elif '.git' in dirlist: return check_output('git rev-parse HEAD', cwd=dir_) elif '.hg' in dirlist: return check_output( "hg log -l 1 --template '{node}\n' -r .", cwd=dir_) else: return self.revision class BzrFetcher(Fetcher): MATCH = re.compile(r""" ^(lp:|launchpad:|https?://((code|www)\.)?launchpad.net/|bzr\+ssh://[^/]+/) (?P[^@]*)(@(?P.*))?$ """, re.VERBOSE) @classmethod def can_fetch(cls, url): matchdict = super(BzrFetcher, cls).can_fetch(url) return matchdict if '/+merge/' not in matchdict.get('repo', '') else {} def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) url = 'lp:' + self.repo cmd = 'branch --use-existing-dir {} {}'.format(url, dir_) if self.revision: cmd = '{} -r {}'.format(cmd, self.revision) bzr(cmd) return rename(dir_) class BzrMergeProposalFetcher(BzrFetcher): @classmethod def can_fetch(cls, url): matchdict = super(BzrFetcher, cls).can_fetch(url) return matchdict if '/+merge/' in matchdict.get('repo', '') else {} def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) api_base = 'https://api.launchpad.net/devel/' url = api_base + self.repo merge_data = get(url).json() target = 'lp:' + merge_data['target_branch_link'][len(api_base):] source = 'lp:' + merge_data['source_branch_link'][len(api_base):] bzr('branch --use-existing-dir {} {}'.format(target, dir_)) bzr('merge {}'.format(source), cwd=dir_) bzr('commit --unchanged -m "Merge commit"', cwd=dir_) return rename(dir_) class LaunchpadGitFetcher(Fetcher): MATCH = re.compile(r""" ^(git:|https)?://git.launchpad.net/ (?P[^@]*)(@(?P.*))?$ """, re.VERBOSE) def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) url = 'https://git.launchpad.net/' + self.repo git('clone {} {}'.format(url, dir_)) if self.revision: git('checkout {}'.format(self.revision), cwd=dir_) return rename(dir_) class GithubFetcher(Fetcher): MATCH = re.compile(r""" ^(gh:|github:|https?://(www\.)?github.com/|git@github.com:) (?P[^@]*)(@(?P.*))?$ """, re.VERBOSE) def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) url = 'https://github.com/' + self.repo git('clone {} {}'.format(url, dir_)) if self.revision: git('checkout {}'.format(self.revision), cwd=dir_) return rename(dir_) class GitFetcher(Fetcher): """Generic git fetcher. Matches any url that starts with "git" or ends with ".git". """ MATCH = re.compile(r""" ^(?Pgit.*|.*\.git)?$ """, re.VERBOSE) def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) git('clone {} {}'.format(self.repo, dir_)) return rename(dir_) class BitbucketFetcher(Fetcher): MATCH = re.compile(r""" ^(bb:|bitbucket:|https?://(www\.)?bitbucket.org/) (?P[^@]*)(@(?P.*))?$ """, re.VERBOSE) def fetch(self, dir_): dir_ = tempfile.mkdtemp(dir=dir_) url = 'https://bitbucket.org/' + self.repo if url.endswith('.git'): return self._fetch_git(url, dir_) return self._fetch_hg(url, dir_) def _fetch_git(self, url, dir_): git('clone {} {}'.format(url, dir_)) if self.revision: git('checkout {}'.format(self.revision), cwd=dir_) return rename(dir_) def _fetch_hg(self, url, dir_): cmd = 'clone {} {}'.format(url, dir_) if self.revision: cmd = '{} -u {}'.format(cmd, self.revision) hg(cmd) return rename(dir_) class LocalFetcher(Fetcher): @classmethod def can_fetch(cls, url): src = os.path.abspath( os.path.join(os.getcwd(), os.path.expanduser(url))) if os.path.exists(src): return dict(path=src) return {} def fetch(self, dir_): dst = os.path.join(dir_, os.path.basename(self.path.rstrip(os.sep))) shutil.copytree(self.path, dst, symlinks=True) return dst class CharmstoreDownloader(Fetcher): """Downloads and extracts a charm archive from the charm store. """ MATCH = re.compile(r""" ^cs:(?P.*)$ """, re.VERBOSE) STORE_URL = 'https://api.jujucharms.com/charmstore/v4/{}' ARCHIVE_URL = STORE_URL + '/archive' REVISION_URL = STORE_URL + '/meta/id-revision' def __init__(self, *args, **kw): super(CharmstoreDownloader, self).__init__(*args, **kw) def fetch(self, dir_): url = self.ARCHIVE_URL.format(self.entity) archive = download_file(url, dir_) entity_dir = extract_archive(archive, dir_) return rename(entity_dir) def get_revision(self, dir_): url = self.REVISION_URL.format(self.entity) return get(url).json()['Revision'] class BundleDownloader(CharmstoreDownloader): MATCH = re.compile(r""" ^bundle:(?P.*)$ """, re.VERBOSE) def __init__(self, *args, **kw): super(BundleDownloader, self).__init__(*args, **kw) self.entity = normalize_bundle_name(self.entity) def normalize_bundle_name(bundle_name): """Convert old-style bundle name to new format. Example: ~charmers/mediawiki/6/single -> ~charmers/mediawiki-single-6 (for more examples see tests) """ owner, bundle = None, bundle_name if bundle.startswith('~'): owner, bundle = bundle.split('/', 1) bundle_parts = bundle.split('/') if len(bundle_parts) == 3 and is_int(bundle_parts[1]): bundle_parts = [ bundle_parts[0], bundle_parts[2], bundle_parts[1]] bundle = '-'.join(bundle_parts) if owner: bundle = '/'.join((owner, bundle)) return bundle def bzr(cmd, **kw): check_call('bzr ' + cmd, **kw) def git(cmd, **kw): check_call('git ' + cmd, **kw) def hg(cmd, **kw): check_call('hg ' + cmd, **kw) def check_call(cmd, **kw): return check_output(cmd, **kw) class FetchError(Exception): pass def check_output(cmd, **kw): args = shlex.split(cmd) try: p = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw ) except OSError as e: msg = 'Unable to run "%s": %s' % (args[0], e.strerror) if e.errno == errno.ENOENT: msg += '\nPlease install "%s" and try again' % args[0] raise FetchError(msg) out, _ = p.communicate() if p.returncode != 0: raise FetchError(out) log.debug('%s: %s', cmd, out) return out FETCHERS = [ BzrFetcher, BzrMergeProposalFetcher, GithubFetcher, BitbucketFetcher, LocalFetcher, CharmstoreDownloader, BundleDownloader, LaunchpadGitFetcher, GitFetcher, ] def get_fetcher(url): for fetcher in FETCHERS: matchdict = fetcher.can_fetch(url) if matchdict: return fetcher(url, **matchdict) raise FetchError('No fetcher for url: %s' % url) charm-tools-2.1.2/charmtools/create.py0000775000175000017500000000610412676737527020214 0ustar marcomarco00000000000000#!/usr/bin/python # # create - generate Juju charm from template # # Copyright (C) 2011 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import sys import argparse from charmtools.generators import ( CharmGenerator, CharmGeneratorException, get_installed_templates, ) from . import utils log = logging.getLogger(__name__) DEFAULT_TEMPLATE = 'reactive-python' def setup_parser(): parser = argparse.ArgumentParser( description='create a new charm') parser.add_argument( 'charmname', help='Name of charm to create.', ) parser.add_argument( 'charmhome', nargs='?', help='Dir to create charm in. Defaults to CHARM_HOME env var or PWD', ) parser.add_argument( '-t', '--template', default=None, help='Name of charm template to use; default is ' + DEFAULT_TEMPLATE + '. Installed templates: ' + ', '.join(get_installed_templates()), ) parser.add_argument( '-a', '--accept-defaults', help='Accept all template configuration defaults without prompting.', action='store_true', default=False, ) parser.add_argument( '-v', '--verbose', help='Print debug information', action='store_true', default=False, ) utils.add_plugin_description(parser) return parser def main(): parser = setup_parser() args = parser.parse_args() args.charmhome = args.charmhome or os.getenv('CHARM_HOME', '.') args.config = None if args.verbose: logging.basicConfig( format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG, ) else: logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.INFO, ) if not args.template: log.info( "Using default charm template (%s). To select a different " "template, use the -t option.", DEFAULT_TEMPLATE) args.template = DEFAULT_TEMPLATE elif args.template not in get_installed_templates(): raise Exception("No template available for '%s'. Available templates " "may be listed by running 'charm create --help'.") generator = CharmGenerator(args) try: generator.create_charm() except CharmGeneratorException as e: log.error(e) return 1 if __name__ == "__main__": sys.exit(main()) charm-tools-2.1.2/charmtools/version.py0000664000175000017500000000163412677250611020417 0ustar marcomarco00000000000000 import pkg_resources import argparse from cli import parser_defaults from charmtools import utils def get_args(args=None): parser = argparse.ArgumentParser( description='display tooling version information') utils.add_plugin_description(parser) parser = parser_defaults(parser) args = parser.parse_args(args) return args def charm_version(): try: from apt.cache import Cache charm_vers = Cache()['charm'].versions for v in charm_vers: if v.is_installed: charm_ver = v.version break except ImportError: charm_ver = 'unavailable' except: charm_ver = 'error' return charm_ver def main(): get_args() version = pkg_resources.get_distribution("charm-tools").version print "charm %s" % charm_version() print "charm-tools %s" % version if __name__ == '__main__': main() charm-tools-2.1.2/charmtools/generators/0000775000175000017500000000000012677251067020533 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/generators/template.py0000664000175000017500000000751212650157641022717 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from fnmatch import fnmatch import inspect import os import yaml from .prompt import PromptList class CharmTemplate(object): """Base plugin for creating a new charm.""" skip_parsing = ['README.ex', '*.pyc'] def skip_template(self, filename): """Return True if file should not be processed for template variable substitution. """ return any(fnmatch(filename, pat) for pat in self.skip_parsing) def prompts(self): """Return a list :class:`Prompt` objects that will be used for configuring the charm created by this plugin. """ return PromptList(self.config().get('prompts')) def config(self): """Return default configuration for this template, loaded from a config.yaml file. This is a sample config.yaml that configures one user prompt:: prompts: symlink: prompt: Symlink all hooks to one python source file? [yN] default: n type: boolean """ path = self.config_path() if os.path.exists(path): with open(path, 'r') as f: return yaml.safe_load(f.read()) return {} def config_path(self): """Return path to the config yaml file for this template. """ class_file = inspect.getfile(self.__class__) return os.path.join(os.path.dirname(class_file), 'config.yaml') def create_charm(self, config, output_path): """Create charm files :param config: dict of config values gathered interactively from the user, loaded from a config file, or as a result of accepting all configuration defaults. :param output_path: directory into which all charm files and dirs should be written. """ raise NotImplementedError def configure_prompt(self, prompt, config): """Reconfigure a prompt based on already-gathered config options Called right before ``prompt`` is rendered to the user or before the default value for the prompt is accepted. This gives the plugin a chance to reconfigure a prompt in any way necessary based on the results of prior prompts (contained in ``config``), including changing its :attr:`prompt` or :attr:`default`. Valid return values are the original prompt (modified or not), an entirely new :class:`Prompt` object, or None if this prompt should be skipped altogether. :param config: dict of all configuration values that have been set prior to this prompt being called. """ return prompt def validate_input(self, input_value, prompt, config): """Return the (possibly modified) validated input value, or raise ValueError with a message explaining why the value is invalid. :param input_value: str entered by user :param prompt: :class:`Prompt` object that elicited this input :param config: dict of all configuration values that have been set prior to this prompt being called. """ return prompt.validate(input_value) charm-tools-2.1.2/charmtools/generators/prompt.py0000664000175000017500000000350112650157641022417 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . def get_validator(type_): return { 'str': str, 'string': str, 'int': int, 'integer': int, 'float': float, 'bool': boolean_validator, 'boolean': boolean_validator, }[type_] def boolean_validator(s): return s and s.lower() == 'true' or s.lower()[0] == 'y' class PromptList(list): def __init__(self, prompt_dicts=None): prompts = [] for k, v in (prompt_dicts or {}).items(): prompts.append(Prompt( k, v['prompt'], v['default'], v.get('type', 'string'), )) super(PromptList, self).__init__(prompts) class Prompt(object): def __init__(self, name, prompt, default, type_='string'): self.name = name self.prompt = prompt.strip() + ' ' self.default = default self.type_ = type_ def validate(self, value): """Return the (possibly modified) validated value, or raise an Exception with a message explaining why the value is invalid. """ return get_validator(self.type_)(value) charm-tools-2.1.2/charmtools/generators/utils.py0000664000175000017500000000455012650157641022243 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import socket import textwrap import pkg_resources log = logging.getLogger(__name__) def get_installed_templates(): for ep in pkg_resources.iter_entry_points('charmtools.templates'): yield ep.name def apt_fill(package): v = {} try: import apt c = apt.Cache() c.open() p = c[package] log.info( "Found %s in apt cache; charm contents have been pre-populated " "from package metadata.", package) # summary and description attrs moved to Version # object in python-apt 0.7.9 if not hasattr(p, 'summary'): p = p.versions[0] v['summary'] = p.summary v['description'] = textwrap.fill(p.description, width=72, subsequent_indent=' ') except: log.info( "No %s in apt cache; creating an empty charm instead.", package) v['summary'] = '' v['description'] = '' return v def portable_get_maintainer(): """ Portable best effort to determine a maintainer """ if 'NAME' in os.environ: name = os.environ['NAME'] else: try: import pwd name = pwd.getpwuid(os.getuid()).pw_gecos.split(',')[0].strip() if not len(name): name = pwd.getpwuid(os.getuid())[0] except: name = 'Your Name' if not len(name): name = 'Your Name' email = os.environ.get('EMAIL', '%s@%s' % (name.replace(' ', '.'), socket.getfqdn())) return name, email charm-tools-2.1.2/charmtools/generators/__init__.py0000664000175000017500000000204012650157641022632 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .generator import CharmGenerator # noqa from .generator import CharmGeneratorException # noqa from .template import CharmTemplate # noqa from .prompt import Prompt # noqa from .prompt import PromptList # noqa from .utils import get_installed_templates # noqa charm-tools-2.1.2/charmtools/generators/generator.py0000664000175000017500000000764012650157641023074 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import shutil import tempfile import pkg_resources from .utils import apt_fill try: from ubuntutools.config import ubu_email as get_maintainer except ImportError: from .utils import portable_get_maintainer as get_maintainer # noqa log = logging.getLogger(__name__) class CharmGeneratorException(Exception): pass class CharmGenerator(object): """Generate a new Charm on the filesystem""" def __init__(self, cmdline_opts): self.opts = cmdline_opts self.plugin = self._load_plugin() def _load_plugin(self): """Instantiate and return the plugin defined by the ``template_name`` entry point. """ for ep in pkg_resources.iter_entry_points('charmtools.templates'): if ep.name == self.opts.template: return ep.load()() def create_charm(self): """Gather user configuration and hand it off to the template plugin to create the files and directories for the new charm. """ output_path = self._get_output_path() if os.path.exists(output_path): raise CharmGeneratorException( '{} exists. Please move it out of the way.'.format( output_path)) log.info('Generating charm for %s in %s', self.opts.charmname, output_path) metadata = self._get_metadata() user_config = self._get_user_config() user_config.update(metadata=metadata) tempdir = self._get_tempdir() try: self.plugin.create_charm(user_config, tempdir) shutil.copytree(tempdir, output_path, symlinks=True) finally: self._cleanup(tempdir) def _get_metadata(self): d = { 'package': self.opts.charmname, 'maintainer': '%s <%s>' % get_maintainer(), } d.update(apt_fill(self.opts.charmname)) return d def _get_user_config(self): """Get user configuration by prompting for it interactively or using predefined defaults. """ config = {} for prompt in self.plugin.prompts(): config[prompt.name] = self._prompt(prompt, config) return config def _prompt(self, prompt, config): """Prompt for and return user input, retrying until valid input received. If the 'accept_defaults' options is enabled, return the default value for the prompt rather than prompting the user. """ prompt = self.plugin.configure_prompt(prompt, config) if not prompt: return None if self.opts.accept_defaults: return prompt.validate(prompt.default) user_input = raw_input(prompt.prompt).strip() if not user_input: return prompt.validate(prompt.default) try: return self.plugin.validate_input(user_input, prompt, config) except Exception as e: print(str(e)) return self._prompt(prompt, config) def _get_output_path(self): return os.path.join(self.opts.charmhome, self.opts.charmname) def _get_tempdir(self): return tempfile.mkdtemp() def _cleanup(self, tempdir): shutil.rmtree(tempdir) charm-tools-2.1.2/charmtools/utils.py0000664000175000017500000003554012676466472020112 0ustar marcomarco00000000000000import argparse import copy import collections import hashlib import json import logging import os import re import subprocess import sys import tempfile import time from contextlib import contextmanager from .diff_match_patch import diff_match_patch import blessings import pathspec from path import path log = logging.getLogger('utils') @contextmanager def cd(directory, make=False): cwd = os.getcwd() if not os.path.exists(directory) and make: os.makedirs(directory) os.chdir(directory) try: yield finally: os.chdir(cwd) @contextmanager def tempdir(chdir=True): dirname = path(tempfile.mkdtemp()) if chdir: with cd(dirname): yield dirname else: yield dirname dirname.rmtree_p() def deepmerge(dest, src): """ Deep merge of two dicts. This is destructive (`dest` is modified), but values from `src` are passed through `copy.deepcopy`. """ for k, v in src.iteritems(): if dest.get(k) and isinstance(v, dict): deepmerge(dest[k], v) elif dest.get(k) and isinstance(v, list): if not v in dest.get(k): dest[k].extend(v) else: dest[k] = copy.deepcopy(v) return dest def delete_path(path, obj): """Delete a dotted path from object, assuming each level is a dict""" # TODO: Support lists parts = path.split('.') for p in parts[:-1]: obj = obj[p] if parts[-1] in obj: del obj[parts[-1]] class NestedDict(dict): def __init__(self, dict_or_iterable=None, **kwargs): if dict_or_iterable: if isinstance(dict_or_iterable, dict): self.update(dict_or_iterable) elif isinstance(dict_or_iterable, collections.Iterable): for k, v in dict_or_iterable: self[k] = v if kwargs: self.update(kwargs) def __setitem__(self, key, value): key = key.split('.') o = self for part in key[:-1]: o = o.setdefault(part, self.__class__()) dict.__setitem__(o, key[-1], value) def __getitem__(self, key): o = self if '.' in key: parts = key.split('.') key = parts[-1] for part in parts[:-1]: o = o[part] return dict.__getitem__(o, key) def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def get(self, key, default=None): try: return self[key] except KeyError: return default def update(self, other): deepmerge(self, other) class ProcessResult(object): def __init__(self, command, exit_code, stdout, stderr): self.command = command self.exit_code = exit_code self.stdout = stdout self.stderr = stderr def __repr__(self): return '' % (self.cmd, self.exit_code) @property def cmd(self): return ' '.join(self.command) @property def output(self): result = '' if self.stdout: result += self.stdout if self.stderr: result += self.stderr return result.strip() @property def json(self): if self.stdout: return json.loads(self.stdout) return None def __eq__(self, other): return self.exit_code == other def __bool__(self): return self.exit_code == 0 __nonzero__ = __bool__ def exit_on_error(self): if not bool(self): sys.stderr.write( '{}\n\nCommand failed: {}\n'.format(self.output, self.cmd)) sys.exit(self.exit_code) class Process(object): def __init__(self, command=None, exit=False, log=log, **kwargs): if isinstance(command, str): command = (command, ) self.command = command self._exit_on_error = exit self.log = log self._kw = kwargs def __repr__(self): return "" % (self.command, ) def exit_on_error(self, exit=True): self._exit_on_error = exit return self def __call__(self, *args, **kw): kwargs = dict(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if self._kw: kwargs.update(self._kw) kwargs.update(kw) if self.command: all_args = self.command + args else: all_args = args if 'env' not in kwargs: kwargs['env'] = os.environ p = subprocess.Popen(all_args, **kwargs) stdout, stderr = p.communicate() self.log.debug(stdout) stdout = stdout.strip() if stderr is not None: stderr = stderr.strip() self.log.debug(stderr) exit_code = p.poll() result = ProcessResult(all_args, exit_code, stdout, stderr) self.log.debug("process: %s (%d)", result.cmd, result.exit_code) if self._exit_on_error: result.exit_on_error() return result command = Process class Commander(object): def __init__(self, log=log): self.log = log def set_log(self, logger): self.log = logger def __getattr__(self, key): return command((key,), log=self.log) def check(self, *args, **kwargs): kwargs.update({'log': self.log}) return command(command=args, **kwargs).exit_on_error() def __call__(self, *args, **kwargs): kwargs.update({'log': self.log}) return command(command=args, shell=True, **kwargs) sh = Commander() dig = Process(('dig', '+short')) api_endpoints = Process(('juju', 'api-endpoints')) def wait_for(timeout, interval, *callbacks, **kwargs): """ Repeatedly try callbacks until all return True This will wait interval seconds between attempts and will error out after timeout has been exceeded. Callbacks will be called with the container as their argument. Setting timeout to zero will loop until cancelled, power runs outs, hardware fails, or the heat death of the universe. """ start = time.time() if timeout: end = start + timeout else: end = 0 bar = kwargs.get('bar', None) message = kwargs.get('message', None) once = 1 while True: passes = True if end > 0 and time.time() > end: raise OSError("Timeout exceeded in wait_for") if bar: bar.next(once, message=message) if once == 1: once = 0 if int(time.time()) % interval == 0: for callback in callbacks: result = callback() passes = passes & bool(result) if passes is False: break if passes is True: break time.sleep(1) def until(*callbacks, **kwargs): return wait_for(0, 20, *callbacks, **kwargs) def retry(attempts, *callbacks, **kwargs): """ Repeatedly try callbacks a fixed number of times or until all return True """ for attempt in xrange(attempts): if 'bar' in kwargs: kwargs['bar'].next(attempt == 0, message=kwargs.get('message')) for callback in callbacks: if not callback(): break else: break else: raise OSError("Retry attempts exceeded") return True def which(program): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for fpath in os.environ["PATH"].split(os.pathsep): fpath = fpath.strip('"') exe_file = os.path.join(fpath, program) if is_exe(exe_file): return exe_file return None def load_class(dpath, workingdir=None): # we expect the last element of the path if not workingdir: workingdir = os.getcwd() with cd(workingdir): modpath, classname = dpath.rsplit('.', 1) modpath = path(modpath.replace(".", "/")) if not modpath.exists(): modpath += ".py" if not modpath.exists(): raise OSError("Unable to load {} from {}".format( dpath, workingdir)) namespace = {} execfile(modpath, globals(), namespace) klass = namespace.get(classname) if klass is None: raise ImportError("Unable to load class {} at {}".format( classname, dpath)) return klass def walk(pathobj, fn, matcher=None, kind=None, **kwargs): """walk pathobj calling fn on each matched entry yielding each result. If kind is 'file' or 'dir' only that type ofd entry will be walked. matcher is an optional function returning bool indicating if the entry should be processed. """ p = path(pathobj) walker = p.walk if kind == "files": walker = p.walkfiles elif kind == "dir": walker = p.walkdir for entry in walker(): relpath = entry.relpath(pathobj) if matcher and not matcher(relpath): continue yield (entry, fn(entry, **kwargs)) def ignore_matcher(ignores=[]): spec = pathspec.PathSpec.from_lines(pathspec.GitIgnorePattern, ignores) def matcher(entity): return entity not in spec.match_files((entity,)) return matcher def sign(pathobj): p = path(pathobj) if not p.isfile(): return None return hashlib.sha256(p.bytes()).hexdigest() def delta_signatures(manifest_filename, ignorer=None): md = path(manifest_filename) repo = md.normpath().dirname() expected = json.load(md.open()) current = {} for rel, sig in walk(repo, sign): rel = rel.relpath(repo) current[rel] = sig add, change, delete = set(), set(), set() for p, s in current.items(): fp = repo / p if not fp.isfile(): continue if ignorer and not ignorer(p): continue if p not in expected["signatures"]: add.add(p) continue # layer, kind, sig # don't include items generated only for the last layer if expected["signatures"][p][0] == "build": continue if expected["signatures"][p][2] != s: change.add(p) for p, d in expected["signatures"].items(): if p not in current: delete.add(path(p)) return add, change, delete class ColoredFormatter(logging.Formatter): def __init__(self, terminal, *args, **kwargs): super(ColoredFormatter, self).__init__(*args, **kwargs) self._terminal = terminal def format(self, record): output = super(ColoredFormatter, self).format(record) if record.levelno >= logging.CRITICAL: line_color = self._terminal.bold_yellow_on_red elif record.levelno >= logging.ERROR: line_color = self._terminal.red elif record.levelno >= logging.WARNING: line_color = self._terminal.yellow elif record.levelno >= logging.INFO: line_color = self._terminal.green else: line_color = self._terminal.cyan return line_color(output) class TermWriter(object): def __init__(self, fp=None, term=None, force_styling=False): if fp is None: fp = sys.stdout self.fp = fp if term is None: term = blessings.Terminal(force_styling=force_styling) self.term = term def __getattr__(self, key): return getattr(self.term, key) def write(self, msg, *args, **kwargs): if 't' in kwargs: raise ValueError("Using reserved token 't' in TermWriter.write") kwargs['t'] = self.term self.fp.write(msg.format(*args, **kwargs)) class _O(dict): def __getattr__(self, k): return self[k] REACTIVE_PATTERNS = [ re.compile("\s*@when"), re.compile(".set_state\(") ] def delta_python(orig, dest, patterns=REACTIVE_PATTERNS, context=2): """Delta two python files looking for certain patterns""" if isinstance(orig, path): od = orig.text() elif hasattr(orig, 'read'): od = orig.read() else: raise TypeError("Expected path() or file(), got %s" % type(orig)) if isinstance(dest, path): dd = dest.text() elif hasattr(orig, 'read'): dd = dest.read() else: raise TypeError("Expected path() or file(), got %s" % type(dest)) differ = diff_match_patch() linect = 0 lastMatch = None for res in differ.diff_main(od, dd): if res[0] == diff_match_patch.DIFF_EQUAL: linect += res[1].count('\n') lastMatch = res[:] continue elif res[0] == diff_match_patch.DIFF_INSERT: linect += res[1].count('\n') else: linect -= res[1].count('\n') for p in patterns: if p.search(lastMatch[1]): yield [linect, lastMatch, res] break def delta_python_dump(orig, dest, patterns=REACTIVE_PATTERNS, context=2, term=None, from_name=None, to_name=None): if term is None: term = TermWriter() def norm_sources(orig, dest): if from_name: oname = from_name else: oname = orig if to_name: dname = to_name else: dname = dest return _O({'orig_name': oname, 'dest_name': dname}) def prefix_lines(lines, lineno): if isinstance(lines, str): lines = lines.splitlines() for i, l in enumerate(lines): lines[i] = "%-5d| %s" % (lineno + i, l) return "\n".join(lines) i = 0 for lineno, last, current in delta_python(orig, dest, patterns, context): # pull enough context if last: context_lines = last[1].splitlines()[-context:] message = norm_sources(orig, dest) message['context'] = prefix_lines(context_lines, lineno - context) message['lineno'] = lineno message['delta'] = current[1] s = {diff_match_patch.DIFF_EQUAL: term.normal, diff_match_patch.DIFF_INSERT: term.green, diff_match_patch.DIFF_DELETE: term.red}[current[0]] message['status_color'] = s # output message term.write("{t.bold}{m.orig_name}{t.normal} --> " "{t.bold}{m.dest_name}{t.normal}:\n", m=message) term.write("{m.context}{m.status_color}{m.delta}{t.normal}\n", m=message) i += 1 return i == 0 class Description(argparse._StoreTrueAction): """A argparse action that prints its parent parser's description and exits.""" def __call__(self, parser, namespace, values, option_string=None): print(parser.description.split('\n')[0].strip('. ')) raise SystemExit() def add_plugin_description(parser): parser.add_argument('--description', action=Description) charm-tools-2.1.2/charmtools/build/0000775000175000017500000000000012677251067017461 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/build/inspector.py0000664000175000017500000000544512650157641022043 0ustar marcomarco00000000000000# coding=utf-8 import json from ruamel import yaml from charmtools.build import config from charmtools import utils theme = { 0: "normal", 1: "green", 2: "cyan", 3: "magenta", 4: "yellow", 5: "red", } def scan_for(col, cur, depth): for e, (rel, d) in col[cur:]: if d and d == depth: return True return False def get_prefix(walk, cur, depth, next_depth): guide = [] for i in range(depth): # scan forward in walk from i seeing if a subsequent # entry happens at each depth if scan_for(walk, cur, i): guide.append(" │ ") else: guide.append(" ") if depth == next_depth: prefix = " ├─── " else: prefix = " └─── " return "{}{}".format("".join(guide), prefix) def inspect(charm, force_styling=False): tw = utils.TermWriter(force_styling=force_styling) manp = charm / ".build.manifest" comp = charm / "layer.yaml" if not manp.exists() or not comp.exists(): return manifest = json.loads(manp.text()) composer = yaml.load(comp.open()) a, c, d = utils.delta_signatures(manp) # ordered list of layers used for legend layers = list(manifest['layers']) def get_depth(e): rel = e.relpath(charm) depth = len(rel.splitall()) - 2 return rel, depth def get_suffix(rel): suffix = "" if rel in a: suffix = "+" elif rel in c: suffix = "*" return suffix def get_color(rel): # name of layer this belongs to color = tw.term.normal if rel in manifest['signatures']: layer = manifest['signatures'][rel][0] layer_key = layers.index(layer) color = getattr(tw, theme.get(layer_key, "normal")) else: if entry.isdir(): color = tw.blue return color tw.write("Inspect %s\n" % composer["is"]) for layer in layers: tw.write("# {color}{layer}{t.normal}\n", color=getattr(tw, theme.get( layers.index(layer), "normal")), layer=layer) tw.write("\n") tw.write("{t.blue}{target}{t.normal}\n", target=charm) ignorer = utils.ignore_matcher(config.DEFAULT_IGNORES) walk = sorted(utils.walk(charm, get_depth), key=lambda x: x[1][0]) for i in range(len(walk) - 1): entry, (rel, depth) = walk[i] nEnt, (nrel, ndepth) = walk[i + 1] if not ignorer(rel): continue tw.write("{prefix}{layerColor}{entry} " "{t.bold}{suffix}{t.normal}\n", prefix=get_prefix(walk, i, depth, ndepth), layerColor=get_color(rel), suffix=get_suffix(rel), entry=rel.name) charm-tools-2.1.2/charmtools/build/tactics.py0000664000175000017500000005360612676466472021506 0ustar marcomarco00000000000000import logging import json from ruamel import yaml import os import tempfile import jsonschema from path import path from charmtools import utils log = logging.getLogger(__name__) class Tactic(object): """ Tactics are first considered in the context of the config layer being called the config layer will attempt to (using its author provided info) create a tactic for a given file. That will later be intersected with any later layers to create a final single plan for each element of the output charm. Callable that will implement some portion of the charm composition Subclasses should implement __str__ and __call__ which should take whatever actions are needed. """ kind = "static" # used in signatures def __init__(self, entity, current, target, config): self.entity = entity self._current = current self._target = target self._raw_data = None self._config = config def __call__(self): raise NotImplementedError def __str__(self): return "{}: {} -> {}".format( self.__class__.__name__, self.entity, self.target_file) @property def current(self): """The file in the current layer under consideration""" return self._current @property def target(self): """The target (final) layer.""" return self._target @property def relpath(self): return self.entity.relpath(self.current.directory) @property def target_file(self): target = self.target.directory / self.relpath return target @property def layer_name(self): return self.current.directory.name @property def repo_path(self): return path("/".join(self.current.directory.splitall()[-2:])) @property def config(self): # Return the config of the layer *above* you # as that is the one that controls your compositing return self._config def combine(self, existing): """Produce a tactic informed by the last tactic for an entry. This is when a rule in a higher level charm overrode something in one of its bases for example.""" return self @classmethod def trigger(cls, relpath): """Should the rule trigger for a given path object""" return False def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ target = self.target_file sig = {} if target.exists() and target.isfile(): sig[self.relpath] = (self.current.url, self.kind, utils.sign(self.target_file)) return sig def lint(self): return True def read(self): return None class ExactMatch(object): FILENAME = None @classmethod def trigger(cls, relpath): return cls.FILENAME == relpath class CopyTactic(Tactic): def __call__(self): if self.entity.isdir(): return should_ignore = utils.ignore_matcher(self.target.config.ignores) if not should_ignore(self.relpath): return target = self.target_file log.debug("Copying %s: %s", self.layer_name, target) # Ensure the path exists target.dirname().makedirs_p() if (self.entity != target) and not target.exists() \ or not self.entity.samefile(target): data = self.read() if data: target.write_bytes(data) self.entity.copymode(target) else: self.entity.copy2(target) def __str__(self): return "Copy {}".format(self.entity) @classmethod def trigger(cls, relpath): return True class InterfaceCopy(Tactic): def __init__(self, interface, relation_name, role, target, config): self.interface = interface self.relation_name = relation_name self.role = role self._target = target self._config = config @property def target(self): return self._target / "hooks/relations" / self.interface.name def __call__(self): # copy the entire tree into the # hooks/relations/ # directory log.debug("Copying Interface %s: %s", self.interface.name, self.target) ignorer = utils.ignore_matcher(self.config.ignores) for entity, _ in utils.walk(self.interface.directory, lambda x: True, matcher=ignorer, kind="files"): target = entity.relpath(self.interface.directory) target = (self.target / target).normpath() target.parent.makedirs_p() entity.copy2(target) init = self.target / "__init__.py" if not init.exists(): # ensure we can import from here directly init.touch() def __str__(self): return "Copy Interface {}".format(self.interface.name) def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ sigs = {} for entry, sig in utils.walk(self.target, utils.sign, kind="files"): relpath = entry.relpath(self._target.directory) sigs[relpath] = (self.interface.url, "static", sig) return sigs def lint(self): impl = self.interface.directory / self.role + '.py' if not impl.exists(): log.error('Missing implementation for interface role: %s.py', self.role) return False valid = True for entry in self.interface.directory.walkfiles(): if entry.splitext()[1] != ".py": continue relpath = entry.relpath(self._target.directory) target = self._target.directory / relpath if not target.exists(): continue unchanged = utils.delta_python_dump(entry, target, from_name=relpath) if not unchanged: valid = False return valid class DynamicHookBind(Tactic): HOOKS = [] def __init__(self, name, owner, target, config, template_file): self.name = name self.owner = owner self._target = target self._template_file = template_file self.targets = [self._target / "hooks" / hook.format(name) for hook in self.HOOKS] def __call__(self): template = self._template_file.text() for target in self.targets: target.parent.makedirs_p() target.write_text(template.format(self.name)) target.chmod(0755) def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ sigs = {} for target in self.targets: rel = target.relpath(self._target.directory) sigs[rel] = (self.owner, "dynamic", utils.sign(target)) return sigs def __str__(self): return "{}: {}".format(self.__class__.__name__, self.name) class InterfaceBind(DynamicHookBind): HOOKS = [ '{}-relation-joined', '{}-relation-changed', '{}-relation-broken', '{}-relation-departed' ] class StorageBind(DynamicHookBind): HOOKS = [ '{}-storage-attached', '{}-storage-detaching', ] class ManifestTactic(ExactMatch, Tactic): FILENAME = ".composer.manifest" def __call__(self): # Don't copy manifests, they are regenerated pass class SerializedTactic(ExactMatch, Tactic): kind = "dynamic" section = None prefix = None def __init__(self, *args, **kwargs): super(SerializedTactic, self).__init__(*args, **kwargs) self.data = {} self._read = False def load(self, fn): raise NotImplementedError('Must be implemented in subclass: load') def dump(self, data): raise NotImplementedError('Must be implemented in subclass: dump') def read(self): if not self._read: self.data = self.load(self.entity.open()) or {} self._read = True def combine(self, existing): # make sure both versions are read in existing.read() self.read() # merge them self.data = utils.deepmerge(existing.data, self.data) return self def apply_edits(self): # Apply any editing rules from config config = self.config if config: section = config.get(self.section) if section: dels = section.get('deletes', []) if self.prefix: namespace = self.data.get(self.prefix, {}) else: namespace = self.data for key in dels: # TODO: Chuck edit this thing utils.delete_path(key, namespace) if not self.target_file.parent.exists(): self.target_file.parent.makedirs_p() def process(self): self.read() self.apply_edits() return self.data def __call__(self): self.dump(self.process()) return self.data class YAMLTactic(SerializedTactic): """Rule Driven YAML generation""" prefix = None def load(self, fn): return yaml.load(fn, Loader=yaml.RoundTripLoader) def dump(self, data): with open(self.target_file, 'w') as fd: yaml.dump(data, fd, Dumper=yaml.RoundTripDumper, default_flow_style=False, default_style='"') class JSONTactic(SerializedTactic): """Rule Driven JSON generation""" prefix = None def load(self, fn): return json.load(fn) def dump(self, data): json.dump(data, self.target_file.open('w'), indent=2) class LayerYAML(YAMLTactic): """ Process the ``layer.yaml`` file from each layer, and generate the resulting ``layer.yaml`` for the built charm. The input ``layer.yaml`` files can contain the following sections: * ``includes`` This is the heart of layering. Layers and interface layers referenced in this list value are pulled in during charm build and combined with each other to produce the final layer. * ``config``, ``metadata``, ``dist``, or ``resources`` These objects can contain a ``deletes`` object to list keys that should be deleted from the resulting ``
.yaml``. * ``defines`` This object can contain a jsonschema used to defined and validate options passed to this layer from another layer. The options and schema will be namespaced by the current layer name. For example, layer "foo" defining ``bar: {type: string}`` will accept ``options: {foo: {bar: "foo"}}`` in the final ``layer.yaml``. * ``options`` This object can contain option name/value sections for other layers. For example, if the current layer includes the previously referenced "foo" layer, it could include ``foo: {bar: "foo"}`` in its ``options`` section. """ FILENAMES = ["layer.yaml", "composer.yaml"] def __init__(self, *args, **kwargs): super(LayerYAML, self).__init__(*args, **kwargs) self.schema = { 'type': 'object', 'properties': {} } @property def target_file(self): # force the non-deprecated name return self.target.directory / "layer.yaml" @classmethod def trigger(cls, relpath): return relpath in cls.FILENAMES def read(self): if not self._read: super(LayerYAML, self).read() self.data.setdefault('options', {}) self.schema['properties'] = { self.current.name: { 'type': 'object', 'properties': self.data.pop('defines', {}), 'default': {}, }, } def combine(self, existing): super(LayerYAML, self).combine(existing) self.schema = utils.deepmerge(existing.schema, self.schema) return self def lint(self): self.read() validator = extend_with_default(jsonschema.Draft4Validator)(self.schema) valid = True for error in validator.iter_errors(self.data['options']): log.error('Invalid value for option %s: %s', '.'.join(error.absolute_path), error.message) valid = False return valid def __call__(self): # rewrite includes to be the current source data = self.data if data is None: return # The split should result in the series/charm path only # XXX: there will be strange interactions with cs: vs local: if 'is' not in data: data['is'] = str(self.current.url) inc = data.get('includes', []) norm = [] for i in inc: if ":" in i: norm.append(i) else: # Attempt to normalize to a repository base norm.append("/".join(path(i).splitall()[-2:])) if norm: data['includes'] = norm if not self.target_file.parent.exists(): self.target_file.parent.makedirs_p() self.dump(data) return data def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ target = self.target_file sig = {} if target.exists() and target.isfile(): sig["layer.yaml"] = (self.current.url, self.kind, utils.sign(self.target_file)) return sig class MetadataYAML(YAMLTactic): """Rule Driven metadata.yaml generation""" section = "metadata" FILENAME = "metadata.yaml" KEY_ORDER = ["name", "summary", "maintainer", "description", "tags", "requires", "provides", "peers"] def __init__(self, *args, **kwargs): super(MetadataYAML, self).__init__(*args, **kwargs) self.storage = {} def read(self): if not self._read: super(MetadataYAML, self).read() self.storage = {name: self.current.url for name in self.data.get('storage', {}).keys()} def combine(self, existing): super(MetadataYAML, self).combine(existing) self.storage.update(existing.storage) return self def apply_edits(self): super(MetadataYAML, self).apply_edits() if not self.config or not self.config.get(self.section): return for key in self.config[self.section].get('deletes', []): if not key.startswith('storage.'): continue _, name = key.split('.', 1) if '.' in name: continue self.storage.pop(name, None) def dump(self, data): final = yaml.comments.CommentedMap() # attempt keys in know order for k in self.KEY_ORDER: if k in data: final[k] = data[k] missing = set(data.keys()) - set(self.KEY_ORDER) for k in sorted(missing): final[k] = data[k] super(MetadataYAML, self).dump(final) class ConfigYAML(YAMLTactic): """Rule driven config.yaml generation""" section = "config" prefix = "options" FILENAME = "config.yaml" class ActionsYAML(YAMLTactic): """Rule driven actions.yaml generation""" section = "actions" FILENAME = "actions.yaml" class DistYAML(YAMLTactic): """Rule driven dist.yaml generation""" section = "dist" prefix = None FILENAME = "dist.yaml" class ResourcesYAML(YAMLTactic): """Rule driven resources.yaml generation""" section = "resources" prefix = None FILENAME = "resources.yaml" class InstallerTactic(Tactic): def __str__(self): return "Installing software to {}".format(self.relpath) @classmethod def trigger(cls, relpath): ext = relpath.splitext()[1] return ext in [".pypi", ] def __call__(self): # install package reference in trigger file # in place directory of target # XXX: Should this map multiline to "-r", self.entity spec = self.entity.text().strip() target = self.target_file.dirname() log.debug("pip installing {} as {}".format( spec, target)) with utils.tempdir(chdir=False) as temp_dir: # We do this dance so we don't have # to guess package and .egg file names # we move everything in the tempdir to the target # and track it for later use in sign() localenv = os.environ.copy() localenv['PYTHONUSERBASE'] = temp_dir utils.Process(("pip3", "install", "--user", "--ignore-installed", spec), env=localenv).exit_on_error()() self._tracked = [] # We now manage two classes of explicit mappings # When python packages are installed into a prefix # we know that bin/* should map to /bin/ # and lib/python*/site-packages/* should map to # /* src_paths = ["bin/*", "lib/python*/site-packages/*"] for p in src_paths: for d in temp_dir.glob(p): if not d.exists(): continue bp = d.relpath(temp_dir) if bp.startswith("bin/"): dst = self.target / bp elif bp.startswith("lib"): dst = target / d.name else: dst = target / bp if dst.exists(): if dst.isdir(): dst.rmtree_p() elif dst.isfile(): dst.remove() if not dst.parent.exists(): dst.parent.makedirs_p() log.debug("Installer moving {} to {}".format(d, dst)) d.move(dst) self._tracked.append(dst) def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ sigs = {} for d in self._tracked: if d.isdir(): for entry, sig in utils.walk(d, utils.sign, kind="files"): relpath = entry.relpath(self.target.directory) sigs[relpath] = (self.current.url, "dynamic", sig) elif d.isfile(): relpath = d.relpath(self.target.directory) sigs[relpath] = ( self.current.url, "dynamic", utils.sign(d)) return sigs class WheelhouseTactic(ExactMatch, Tactic): kind = "dynamic" FILENAME = 'wheelhouse.txt' def __init__(self, *args, **kwargs): super(WheelhouseTactic, self).__init__(*args, **kwargs) self.tracked = [] self.previous = [] def __str__(self): return "Building wheelhouse in {}".format(self.target.directory / 'wheelhouse') def combine(self, existing): self.previous = existing.previous + [existing] return self def _add(self, pip, wheelhouse, *reqs): with utils.tempdir(chdir=False) as temp_dir: # put in a temp dir first to ensure we track all of the files utils.Process((pip, 'install', '--no-binary', ':all:', '-d', temp_dir) + reqs).exit_on_error()() for wheel in temp_dir.files(): dest = wheelhouse / wheel.basename() dest.remove_p() wheel.move(wheelhouse) self.tracked.append(dest) def __call__(self, venv=None): create_venv = venv is None venv = venv or path(tempfile.mkdtemp()) pip = venv / 'bin' / 'pip3' wheelhouse = self.target.directory / 'wheelhouse' wheelhouse.mkdir_p() if create_venv: utils.Process(('virtualenv', '--python', 'python3', venv)).exit_on_error()() utils.Process((pip, 'install', '-U', 'pip', 'wheel')).exit_on_error()() for tactic in self.previous: tactic(venv) self._add(pip, wheelhouse, '-r', self.entity) if create_venv: venv.rmtree_p() def sign(self): """return sign in the form {relpath: (origin layer, SHA256)} """ sigs = {} for tactic in self.previous: sigs.update(tactic.sign()) for d in self.tracked: relpath = d.relpath(self.target.directory) sigs[relpath] = ( self.current.url, "dynamic", utils.sign(d)) return sigs def load_tactic(dpath, basedir): """Load a tactic from the current layer using a dotted path. The last element in the path should be a Tactic subclass """ obj = utils.load_class(dpath, basedir) if not issubclass(obj, Tactic): raise ValueError("Expected to load a tactic for %s" % dpath) return obj def extend_with_default(validator_class): """ Extend a jsonschema validator to propagate default values prior to validating. """ validate_properties = validator_class.VALIDATORS["properties"] def set_defaults(validator, properties, instance, schema): for prop, subschema in properties.iteritems(): if "default" in subschema: instance.setdefault(prop, subschema["default"]) for error in validate_properties( validator, properties, instance, schema): yield error return jsonschema.validators.extend( validator_class, {"properties": set_defaults}, ) DEFAULT_TACTICS = [ ManifestTactic, WheelhouseTactic, InstallerTactic, DistYAML, ResourcesYAML, MetadataYAML, ConfigYAML, ActionsYAML, LayerYAML, CopyTactic ] charm-tools-2.1.2/charmtools/build/config.py0000664000175000017500000000665012650157641021301 0ustar marcomarco00000000000000from .tactics import DEFAULT_TACTICS, load_tactic import pathspec from ruamel import yaml import logging from path import path from otherstuf import chainstuf DEFAULT_IGNORES = [ ".bzr/", ".git/", "**/.ropeproject/", "*.pyc", "*~", ".tox/", "build/", ] class BuildConfig(chainstuf): """Defaults for controlling the generator, each layer in the inclusion graph can provide values, including things like overrides, or warnings if things are overridden that shouldn't be. """ DEFAULT_FILE = "layer.yaml" OLD_CONFIG = "composer.yaml" def __init__(self, *args, **kwargs): super(BuildConfig, self).__init__(*args, **kwargs) self['_tactics'] = [] self.configured = False def __getattr__(self, key): return self[key] def rget(self, key): """Combine all the results from all the layers into a single iter""" result = [] for m in self.maps: r = m.get(key) if r: if isinstance(r, (list, tuple)): result.extend(r) else: result.append(r) return result def configure(self, config_file, allow_missing=False): config_file = path(config_file) data = None if not config_file.exists() and not allow_missing: raise OSError("Missing Config File {}".format(config_file)) try: if config_file.exists() and config_file.text().strip() != "": data = yaml.load(config_file.open()) self.configured = True except yaml.parser.ParserError: logging.critical("Malformed Config file: {}".format(config_file)) raise if data: self.update(data) # look at any possible imports and use them to build tactics tactics = self.get('tactics') basedir = config_file.dirname() if tactics: for name in tactics: tactic = load_tactic(name, basedir) self._tactics.append(tactic) return self @classmethod def from_config(cls, config_file, allow_missing=False): c = cls() c.configure(config_file, allow_missing) return c def add_config(self, config_file, allow_missing=False): c = self.new_child() c.configure(config_file, allow_missing) return c @property def name(self): return self.get('name') @property def ignores(self): return self.rget('ignore') + DEFAULT_IGNORES def tactics(self): # XXX: combine from config layer return self.rget('_tactics') + DEFAULT_TACTICS def tactic(self, entity, current, target, next_config): # Produce a tactic for the entity in question # These will be accumulate through the layers # and executed later bd = current.directory # Ignore handling if next_config: spec = pathspec.PathSpec.from_lines(pathspec.GitIgnorePattern, next_config.ignores) p = entity.relpath(bd) matches = spec.match_files((p,)) if p in matches: return None for tactic in self.tactics(): if tactic.trigger(entity.relpath(bd)): return tactic(target=target, entity=entity, current=current, config=next_config) return None charm-tools-2.1.2/charmtools/build/fetchers.py0000664000175000017500000001020512676466472021643 0ustar marcomarco00000000000000import os import requests from charmtools import fetchers from charmtools.fetchers import (git, # noqa Fetcher, get_fetcher, FetchError) from path import path class RepoFetcher(fetchers.LocalFetcher): @classmethod def can_fetch(cls, url): search_path = [os.getcwd(), os.environ.get("JUJU_REPOSITORY", ".")] cp = os.environ.get("LAYER_PATH") if cp: search_path.extend(cp.split(":")) for part in search_path: p = (path(part) / url).normpath() if p.exists(): return dict(path=p) return {} fetchers.FETCHERS.insert(0, RepoFetcher) class InterfaceFetcher(fetchers.LocalFetcher): # XXX: When hosted somewhere, fix this INTERFACE_DOMAIN = "http://interfaces.juju.solutions" NAMESPACE = "interface" ENVIRON = "INTERFACE_PATH" OPTIONAL_PREFIX = "juju-relation-" ENDPOINT = "/api/v1/interface" NO_LOCAL_LAYERS = False @classmethod def can_fetch(cls, url): # Search local path first, then the interface webservice if url.startswith("{}:".format(cls.NAMESPACE)): name = url[len(cls.NAMESPACE) + 1:] if not cls.NO_LOCAL_LAYERS: prefixed_name = '{}-{}'.format(cls.NAMESPACE, name) search_path = [os.environ.get("JUJU_REPOSITORY", ".")] cp = os.environ.get(cls.ENVIRON) if cp: search_path.extend(cp.split(os.pathsep)) for part in search_path: basepath = path(part) for dirname in (name, prefixed_name): p = (basepath / dirname).normpath() if p.exists(): return dict(path=p) choices = [name] if name.startswith(cls.OPTIONAL_PREFIX): choices.append(name[len(cls.OPTIONAL_PREFIX):]) for choice in choices: uri = "%s%s/%s/" % ( cls.INTERFACE_DOMAIN, cls.ENDPOINT, choice) try: result = requests.get(uri) except: result = None if result and result.ok: result = result.json() if result.get("repo"): return result return {} def target(self, dir_): """Return a :class:`path` of the directory where the downloaded item will be located. :param str dir_: Directory into which the item will be downloaded. :return: :class:`path` """ if hasattr(self, "path"): return self.path elif hasattr(self, "repo"): _, target = self._get_repo_fetcher_and_target(self.repo, dir_) return target def _get_repo_fetcher_and_target(self, repo, dir_): """Returns a :class:`Fetcher` for ``repo``, and the destination dir at which the downloaded repo will be created. :param str repo: The repo url. :param str dir_: Directory into which the repo will be downloaded. :return: 2-tuple of (:class:`Fetcher`, :class:`path`) """ u = self.url[len(self.NAMESPACE) + 1:] f = get_fetcher(repo) if hasattr(f, "repo"): basename = path(f.repo).name.splitext()[0] else: basename = u return f, path(dir_) / basename def fetch(self, dir_): if hasattr(self, "path"): return super(InterfaceFetcher, self).fetch(dir_) elif hasattr(self, "repo"): f, target = self._get_repo_fetcher_and_target(self.repo, dir_) res = f.fetch(dir_) if res != target: target.rmtree_p() path(res).rename(target) return target fetchers.FETCHERS.insert(0, InterfaceFetcher) class LayerFetcher(InterfaceFetcher): INTERFACE_DOMAIN = "http://interfaces.juju.solutions" NAMESPACE = "layer" ENVIRON = "LAYER_PATH" OPTIONAL_PREFIX = "juju-layer-" ENDPOINT = "/api/v1/layer" fetchers.FETCHERS.insert(0, LayerFetcher) charm-tools-2.1.2/charmtools/build/__init__.py0000775000175000017500000005667612676737527021632 0ustar marcomarco00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import json import logging import os import sys import blessings from collections import OrderedDict from path import path import requests import yaml from charmtools.build import inspector import charmtools.build.tactics from charmtools.build.config import (BuildConfig, DEFAULT_IGNORES) from charmtools.build.fetchers import ( InterfaceFetcher, LayerFetcher, get_fetcher, FetchError, ) from charmtools import utils from .. import repofinder log = logging.getLogger("build") class BuildError(Exception): pass class Configable(object): CONFIG_FILE = None def __init__(self): self._config = BuildConfig() self.config_file = None @property def config(self): if self._config.configured: return self._config if self.config_file and self.config_file.exists(): self._config.configure(self.config_file) return self._config @property def configured(self): return bool(self.config is not None and self.config.configured) class Fetched(Configable): def __init__(self, url, target_repo, name=None): super(Fetched, self).__init__() self.url = url self.target_repo = target_repo / self.NAMESPACE self.directory = None self._name = name @property def name(self): if self._name: return self._name if self.url.startswith(self.NAMESPACE): return self.url[len(self.NAMESPACE)+1:] return self.url def __repr__(self): return "<{} {}:{}>".format(self.__class__.__name__, self.url, self.directory) def __div__(self, other): return self.directory / other def fetch(self): try: fetcher = get_fetcher(self.url) except FetchError: # We might be passing a local dir path directly # which fetchers don't currently support self.directory = path(self.url) else: if hasattr(fetcher, "path") and fetcher.path.exists(): self.directory = path(fetcher.path) else: if not self.target_repo.exists(): self.target_repo.makedirs_p() self.directory = path(fetcher.fetch(self.target_repo)) if not self.directory.exists(): raise BuildError( "Unable to locate {}. " "Do you need to set {}?".format( self.url, self.ENVIRON)) self.config_file = self.directory / self.CONFIG_FILE if not self.config_file.exists(): if self.OLD_CONFIG and (self.directory / self.OLD_CONFIG).exists(): self.config_file = (self.directory / self.OLD_CONFIG) self._name = self.config.name return self class Interface(Fetched): CONFIG_FILE = "interface.yaml" OLD_CONFIG = None NAMESPACE = "interface" ENVIRON = "INTERFACE_PATH" class Layer(Fetched): CONFIG_FILE = "layer.yaml" OLD_CONFIG = "composer.yaml" NAMESPACE = "layer" ENVIRON = "LAYER_PATH" class Builder(object): """ Handle the processing of overrides, implements the policy of BuildConfig """ PHASES = ['lint', 'read', 'call', 'sign', 'build'] HOOK_TEMPLATE_FILE = path('hooks/hook.template') DEFAULT_SERIES = 'trusty' def __init__(self): self.config = BuildConfig() self.force = False self._name = None self._charm = None self._top_layer = None self.hide_metrics = False @property def top_layer(self): if not self._top_layer: self._top_layer = Layer(self.charm, self.deps).fetch() return self._top_layer @property def charm(self): return self._charm @charm.setter def charm(self, value): self._charm = path(value) @property def charm_metadata(self): if not hasattr(self, '_charm_metadata'): md = path(self.charm) / "metadata.yaml" setattr( self, '_charm_metadata', yaml.load(md.open()) if md.exists() else None) return self._charm_metadata @property def name(self): if self._name: return self._name # optionally extract name from the top layer self._name = str(path(self.charm).abspath().name) # however if the current layer has a metadata.yaml we can # use its name if self.charm_metadata: name = self.charm_metadata.get("name") if name: self._name = name return self._name @name.setter def name(self, value): self._name = value @property def manifest(self): return self.target_dir / '.build.manifest' def check_series(self): """Make sure this is a either a multi-series charm, or we have a build series defined. If not, fall back to a default series. """ if self.series: return if self.charm_metadata and self.charm_metadata.get('series'): return self.series = self.DEFAULT_SERIES def status(self): result = {} result.update(vars(self)) for e in ["LAYER_PATH", "INTERFACE_PATH", "JUJU_REPOSITORY"]: result[e] = os.environ.get(e) return result def create_repo(self): # Generated output will go into this directory base = path(self.output_dir) self.repo = (base / (self.series if self.series else 'builds')) # And anything it includes from will be placed here # outside the series self.deps = (base / "deps") self.target_dir = (self.repo / self.name) def find_or_create_repo(self, allow_create=True): # see if output dir is already in a repo, we can use that directly if self.output_dir == path(self.charm).abspath(): # we've indicated in the cmdline that we are doing an inplace # update if (self.series and self.output_dir.parent.basename() == self.series): # we're already in a repo self.repo = self.output_dir.parent.parent self.deps = (self.repo / "deps") self.target_dir = self.output_dir return if allow_create: self.create_repo() else: raise ValueError("%s doesn't seem valid", self.charm.directory) log.info("Destination charm directory: {}".format(self.target_dir)) @property def layers(self): layers = [] for i in self._layers: layers.append(i.url) for i in self._interfaces: layers.append(i.url) layers.append("build") return layers def fetch(self): self.target_dir.makedirs_p() if not self.top_layer.configured: log.warn("The top level layer expects a " "valid layer.yaml file") # Manually create a layer object for the output self.target = Layer(self.name, self.repo) self.target.directory = self.target_dir return self.fetch_deps(self.top_layer) def fetch_deps(self, layer): results = {"layers": [], "interfaces": []} self.fetch_dep(layer, results) # results should now be a bottom up list # of deps. Using the in order results traversal # we can build out our plan for each file in the # output layer results["layers"].append(layer) self._layers = results["layers"] self._interfaces = results["interfaces"] return results def fetch_dep(self, layer, results): # Recursively fetch and scan layers # This returns a plan for each file in the result baselayers = layer.config.get('includes', []) if not baselayers: # no deps, this is possible for any base # but questionable for the target return if isinstance(baselayers, str): baselayers = [baselayers] for base in baselayers: # The order of these commands is important. We only want to # fetch something if we haven't already fetched it. if base.startswith("interface:"): iface = Interface(base, self.deps) if iface.name in [i.name for i in results['interfaces']]: continue results["interfaces"].append(iface.fetch()) else: base_layer = Layer(base, self.deps) if base_layer.name in [i.name for i in results['layers']]: continue results["layers"].append(base_layer.fetch()) self.fetch_dep(base_layer, results) def build_tactics(self, entry, current, config, output_files): # Delegate to the config object, it's rules # will produce a tactic relname = entry.relpath(current.directory) current = current.config.tactic(entry, current, self.target, config) existing = output_files.get(relname) if existing is not None: tactic = current.combine(existing) else: tactic = current output_files[relname] = tactic def plan_layers(self, layers, output_files): config = BuildConfig() cfgfn = layers["layers"][0] / BuildConfig.DEFAULT_FILE if cfgfn.exists(): config = config.add_config( cfgfn, True) else: cfgfn = layers["layers"][0] / BuildConfig.OLD_CONFIG config = config.add_config( cfgfn, True) layers["layers"][-1].url = self.name for i, layer in enumerate(layers["layers"]): log.info("Processing layer: %s", layer.url) if i + 1 < len(layers["layers"]): next_layer = layers["layers"][i + 1] config = config.add_config( next_layer / BuildConfig.DEFAULT_FILE, True) list(e for e in utils.walk(layer.directory, self.build_tactics, current=layer, config=config, output_files=output_files)) plan = [t for t in output_files.values() if t] return plan def plan_interfaces(self, layers, output_files, plan): # Interface includes don't directly map to output files # as they are computed in combination with the metadata.yaml if not layers.get('interfaces'): return metadata_tactic = [tactic for tactic in plan if isinstance( tactic, charmtools.build.tactics.MetadataYAML)] if not metadata_tactic: raise BuildError('At least one layer must provide metadata.yaml') meta = metadata_tactic[0].process() if not meta and layers.get('interfaces'): raise BuildError( 'Includes interfaces but no metadata.yaml to bind them') elif self.HOOK_TEMPLATE_FILE not in output_files: raise BuildError('At least one layer must provide %s', self.HOOK_TEMPLATE_FILE) elif not meta: log.warn('Empty metadata.yaml') template_file = self.target / self.HOOK_TEMPLATE_FILE target_config = layers["layers"][-1].config specs = [] used_interfaces = set() for role in ("provides", "requires", "peers"): for k, v in meta.get(role, {}).items(): # ex: ["provides", "db", "mysql"] specs.append([role, k, v["interface"]]) used_interfaces.add(v["interface"]) for iface in layers["interfaces"]: if iface.name not in used_interfaces: # we shouldn't include something the charm doesn't use log.warn("layer.yaml includes {} which isn't " "used in metadata.yaml".format( iface.name)) continue for role, relation_name, interface_name in specs: if interface_name != iface.name: continue log.info("Processing interface: %s", interface_name) # COPY phase plan.append( charmtools.build.tactics.InterfaceCopy( iface, relation_name, role, self.target, target_config) ) # Link Phase plan.append( charmtools.build.tactics.InterfaceBind( relation_name, iface.url, self.target, target_config, template_file)) def plan_storage(self, layers, output_files, plan): # Storage hooks don't directly map to output files # as they are computed in combination with the metadata.yaml metadata_tactic = [tactic for tactic in plan if isinstance( tactic, charmtools.build.tactics.MetadataYAML)] if not metadata_tactic: raise BuildError('At least one layer must provide metadata.yaml') meta_tac = metadata_tactic[0] meta_tac.process() if not meta_tac.storage: return if self.HOOK_TEMPLATE_FILE not in output_files: raise BuildError('At least one layer must provide %s', self.HOOK_TEMPLATE_FILE) template_file = self.target / self.HOOK_TEMPLATE_FILE target_config = layers["layers"][-1].config for name, owner in meta_tac.storage.items(): plan.append( charmtools.build.tactics.StorageBind( name, owner, self.target, target_config, template_file)) def formulate_plan(self, layers): """Build out a plan for each file in the various layers, taking into account config at each layer""" output_files = OrderedDict() self.plan = self.plan_layers(layers, output_files) self.plan_interfaces(layers, output_files, self.plan) self.plan_storage(layers, output_files, self.plan) if self.hide_metrics is not True: self.post_metrics(layers) return self.plan def post_metrics(self, layers): url = "/".join((self.interface_service, "api/v1/metrics/")) data = {"kind": "build", "layers": [l.url for l in layers["layers"]], "interfaces": [i.url for i in layers["interfaces"]]} try: requests.post(url, json.dumps(data).encode('utf-8'), timeout=10) except requests.exceptions.RequestException: log.warning("Unable to post usage metrics") def exec_plan(self, plan=None, layers=None): signatures = {} cont = True for phase in self.PHASES: for tactic in plan: if phase == "lint": cont &= tactic.lint() if cont is False and self.force is not True: return elif phase == "read": # We use a read (into memory phase to make layer comps # simpler) tactic.read() elif phase == "call": tactic() elif phase == "sign": sig = tactic.sign() if sig: signatures.update(sig) new_repo = not self.manifest.exists() if new_repo: added, changed, removed = set(), set(), set() else: ignores = utils.ignore_matcher(DEFAULT_IGNORES) added, changed, _ = utils.delta_signatures(self.manifest, ignores) removed = self.clean_removed(signatures) # write out the sigs if "sign" in self.PHASES: self.write_signatures(signatures, layers) if self.report: self.write_report(new_repo, added, changed, removed) def write_signatures(self, signatures, layers): signatures['.build.manifest'] = ["build", 'dynamic', 'unchecked'] self.manifest.write_text(json.dumps(dict( signatures=signatures, layers=layers, ), indent=2)) def generate(self): layers = self.fetch() self.formulate_plan(layers) self.exec_plan(self.plan, self.layers) def validate(self): self._validate_charm_repo() if not self.manifest.exists(): return [], [], [] ignorer = utils.ignore_matcher(DEFAULT_IGNORES) a, c, d = utils.delta_signatures(self.manifest, ignorer) for f in a: log.warn( "Added unexpected file, should be in a base layer: %s", f) for f in c: log.warn( "Changed file owned by another layer: %s", f) for f in d: log.warn( "Deleted a file owned by another layer: %s", f) if a or c or d: if self.force is True: log.info( "Continuing with known changes to target layer. " "Changes will be overwritten") else: raise BuildError( "Unable to continue due to unexpected modifications " "(try --force)") return a, c, d def _validate_charm_repo(self): if 'repo' not in self.top_layer.config: msg = 'Please add a `repo` key to your {}'.format( self.top_layer.config_file.name) recommended_repo = repofinder.get_recommended_repo(self.charm) if recommended_repo: msg += ', e.g. repo: {}'.format(recommended_repo) else: msg += ', with a url from which your layer can be cloned.' log.warn(msg) def __call__(self): self.find_or_create_repo() log.debug(json.dumps( self.status(), indent=2, sort_keys=True, default=str)) self.validate() self.generate() def inspect(self): self.charm = path(self.charm).abspath() inspector.inspect(self.charm, force_styling=self.force_raw) def normalize_outputdir(self): od = path(self.charm).abspath() repo = os.environ.get('JUJU_REPOSITORY') if repo: repo = path(repo) if repo.exists(): od = repo elif ":" in od: od = od.basename log.info("Composing into {}".format(od)) self.output_dir = od def clean_removed(self, signatures): """ Clean up any files that were accounted for in the previous build manifest but which have been removed in the current set of sigs. """ old_sigs = json.loads(self.manifest.text())['signatures'] old_files = set(old_sigs.keys()) - {'.build.manifest'} new_files = set(signatures.keys()) removed = old_files - new_files for filename in removed: filepath = self.target_dir / filename filepath.remove() return removed def write_report(self, new_repo, added, changed, removed): """ Log messages indicating what changed with this (re)build. """ log.info('') log.info('---------------------------------------') log.info(' Build Report') log.info('---------------------------------------') if new_repo: log.info('New build; all files were modified.') return elif any([added, changed, removed]): sigils = ['+', ' ', '-'] for sigil, filenames in zip(sigils, [added, changed, removed]): for filename in filenames: log.info(' {} {}'.format(sigil, filename)) else: log.info('No new changes; no files were modified.') def configLogging(build): global log logging.captureWarnings(True) clifmt = utils.ColoredFormatter( blessings.Terminal(), '%(name)s: %(message)s') root_logger = logging.getLogger() clihandler = logging.StreamHandler(sys.stdout) clihandler.setFormatter(clifmt) if isinstance(build.log_level, str): build.log_level = build.log_level.upper() root_logger.setLevel(build.log_level) log.setLevel(build.log_level) root_logger.addHandler(clihandler) requests_logger = logging.getLogger("requests") requests_logger.setLevel(logging.WARN) urllib_logger = logging.getLogger("urllib3") urllib_logger.setLevel(logging.CRITICAL) def inspect(args=None): build = Builder() parser = argparse.ArgumentParser( description='inspect the layers of a built charm') parser.add_argument('-r', '--force-raw', action="store_true", help="Force raw output (color)") parser.add_argument('-l', '--log-level', default=logging.INFO) parser.add_argument('charm', nargs="?", default=".", type=path) utils.add_plugin_description(parser) # Namespace will set the options as attrs of build parser.parse_args(args, namespace=build) configLogging(build) build.inspect() def deprecated_main(): namemap = { 'compose': 'build', 'generate': 'build', 'refresh': 'build', 'inspect': 'layers', } cmd = sys.argv[0] if "-" in cmd: old = cmd.rsplit('-', 1)[-1] else: old = sys.argv[1] new = namemap[old] class MockBuild(object): log_level = 'INFO' configLogging(MockBuild) msg = "{} has been deprecated, please use {}".format(old, new) if '--description' in sys.argv: print(msg) else: log.critical(msg) def main(args=None): build = Builder() parser = argparse.ArgumentParser( description="build a charm from layers and interfaces", formatter_class=argparse.RawDescriptionHelpFormatter,) parser.add_argument('-l', '--log-level', default=logging.INFO) parser.add_argument('-f', '--force', action="store_true") parser.add_argument('-o', '--output-dir', type=path) parser.add_argument('-s', '--series', default=None) parser.add_argument('--hide-metrics', dest="hide_metrics", default=False, action="store_true") parser.add_argument('--interface-service', default="http://interfaces.juju.solutions") parser.add_argument('--no-local-layers', action="store_true", help="Don't use local layers when building. " "Forces included layers to be downloaded " "from the interface service.") parser.add_argument('-n', '--name', help="Build a charm of 'name' from 'charm'") parser.add_argument('-r', '--report', action="store_true", help="Show post-build report of changes") parser.add_argument('charm', nargs="?", default=".", type=path) utils.add_plugin_description(parser) # Namespace will set the options as attrs of build parser.parse_args(args, namespace=build) if build.charm == "help": parser.print_help() raise SystemExit(0) # Monkey patch in the domain for the interface webservice InterfaceFetcher.INTERFACE_DOMAIN = build.interface_service LayerFetcher.INTERFACE_DOMAIN = build.interface_service InterfaceFetcher.NO_LOCAL_LAYERS = build.no_local_layers configLogging(build) if not build.output_dir: build.normalize_outputdir() if not build.series: build.check_series() try: build() except (BuildError, FetchError) as e: log.error(*e.args) raise SystemExit(1) if __name__ == '__main__': main() charm-tools-2.1.2/charmtools/__init__.py0000775000175000017500000000424712650157641020477 0ustar marcomarco00000000000000#!/usr/bin/env python # Copyright (C) 2013 Marco Ceppi . # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import sys import subprocess from . import cli from . import version def charm(): if len(sys.argv) < 2: cli.usage(1) sub = sys.argv[1] opts = sys.argv[2:] if sub == '--description': sys.stdout.write("Tools for authoring and maintaining charms\n") sys.exit(0) if sub == '--help': cli.usage(0) if sub == '--version': version.main() sys.exit(0) if sub == '--list': print '\n'.join(cli.subcommands(os.path.realpath(__file__))) sys.exit(0) sub_exec = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "charm-%s%s" % (sub, cli.ext())) if not os.path.exists(sub_exec): sys.stderr.write('Error: %s is not a valid subcommand\n\n' % sub) cli.usage(2) sys.exit(subprocess.call([sub_exec] + opts)) def bundle(): if len(sys.argv) < 2: cli.usage(0) sub = sys.argv[1] opts = sys.argv[2:] if sub == '--description': sys.stdout.write("Tools for managing bundles\n") sys.exit(0) if sub == '--help': cli.usage(0) sub_exec = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "charm-%s%s" % (sub, cli.ext())) if not os.path.exists(sub_exec): sys.stderr.write('Error: %s is not a valid subcommand\n\n' % sub) cli.usage(2) sys.exit(subprocess.call([sub_exec, '--bundle'] + opts)) if __name__ == '__main__': charm() charm-tools-2.1.2/charmtools/repofinder.py0000664000175000017500000000511712676466472021104 0ustar marcomarco00000000000000import re import subprocess from collections import namedtuple from . import utils def get_recommended_repo(path): """Given vcs directory ``path``, returns the url from which the repo can be cloned. For git, an 'upstream' remote will be preferred over 'origin'. For bzr, the :parent: branch will be preferred. For hg, the 'default' alias will be preferred. Returns None if the directory is not a repo, or a remote url can not be determined. :param path: A :class:`path.path` to a directory :return: A url string, or None """ Command = namedtuple("Command", "args parse") cmds = [ Command(['git', 'remote', '-v'], _parse_git), Command(['bzr', 'info'], _parse_bzr), Command(['hg', 'paths'], _parse_hg), ] if not path.exists(): return None with utils.cd(str(path)): for cmd in cmds: try: output = subprocess.check_output(cmd.args) if output: repo = cmd.parse(output) if repo: return repo except (subprocess.CalledProcessError, OSError): continue def _parse_git(txt): pat = re.compile( r'(?P\S+)\s+(?P\S+)\s+\((?P[^\)]+)\)') urls = {} for line in txt.split('\n'): match = pat.search(line) if match: d = match.groupdict() if d['name'] == 'upstream' and d['type'] == 'fetch': return d['url'].strip() elif d['type'] == 'fetch': urls[d['name']] = d['url'].strip() if 'origin' in urls: return urls['origin'] for url in urls.values(): return url def _parse_bzr(txt): branch_types = ['parent', 'push', 'submit'] pat = re.compile( r'(?P({})) branch: (?P.*)'.format( '|'.join(branch_types))) matches = {} for line in txt.split('\n'): match = pat.search(line) if match: d = match.groupdict() matches[d['branch_type']] = d['url'].strip() if not matches: return for typ in branch_types: url = matches.get(typ) if url: return url def _parse_hg(txt): pat = re.compile(r'(?P[^\s]+) = (?P.*)') urls = [] for line in txt.split('\n'): match = pat.search(line) if match: d = match.groupdict() if d['name'] == 'default': return d['url'].strip() else: urls.append(d['url'].strip()) return urls[0] if urls else None charm-tools-2.1.2/charmtools/charms.py0000664000175000017500000006601212676755463020226 0ustar marcomarco00000000000000import os import re import hashlib import email.utils import colander import yaml from stat import ST_MODE from stat import S_IXUSR from linter import Linter from launchpadlib.launchpad import Launchpad KNOWN_METADATA_KEYS = [ 'name', 'summary', 'maintainer', 'maintainers', 'min-juju-version', 'description', 'categories', 'subordinate', 'provides', 'requires', 'format', 'peers', 'tags', 'series', 'storage', 'extra-bindings', ] KNOWN_RELATION_KEYS = ['interface', 'scope', 'limit', 'optional'] KNOWN_SCOPES = ['global', 'container'] TEMPLATE_PATH = os.path.abspath(os.path.dirname(__file__)) TEMPLATE_README = os.path.join( TEMPLATE_PATH, 'templates', 'bash', 'files', 'README.ex') TEMPLATE_ICON = os.path.join( TEMPLATE_PATH, 'templates', 'bash', 'files', 'icon.svg') KNOWN_OPTION_KEYS = set(('description', 'type', 'default')) KNOWN_OPTION_TYPES = { 'string': basestring, 'int': int, 'float': float, 'boolean': bool, } ALLOW_NONE_DEFAULT = (basestring, int, float) class RelationError(Exception): pass class CharmLinter(Linter): # _WINDOWS_HOOKS_EXTS is the list of possible extensions for hooks # on Windows. File extensions must be present in Windows ad thus # we must specially check for them when linting the hooks. _WINDOWS_HOOKS_EXTS = [".ps1", ".cmd", ".bat", ".exe"] def check_hook(self, hook, hooks_path, recommended=False): hook_path = os.path.join(hooks_path, hook) ispscharm = False # flag to indicate whether PowerShell charm or not. # iterate through the possible hook-extension # combinations and find the right one: for path in [hook_path + ext for ext in self._WINDOWS_HOOKS_EXTS]: if os.path.isfile(path): hook_path = path ispscharm = True break try: mode = os.stat(hook_path)[ST_MODE] # NOTE: hooks on Windows are judged as executable depending on # their extension; not their mode. if (not mode & S_IXUSR) and not ispscharm: self.info(hook + " not executable") with open(hook_path, 'r') as hook_file: count = 0 for line in hook_file: count += 1 hook_warnings = [ {'re': re.compile("http://169\.254\.169\.254/"), 'msg': "hook accesses EC2 metadata service directly"}] for warning in hook_warnings: if warning['re'].search(line): self.warn( "(%s:%d) - %s" % (hook, count, warning['msg'])) return True except OSError: if recommended: self.info("missing recommended hook " + hook) return False def check_relation_hooks(self, relations, subordinate, hooks_path): template_interfaces = ('interface-name') template_relations = ('relation-name') for r in relations.items(): if r[0].startswith('juju-'): self.info('juju-* is a reserved relation name') if type(r[1]) != dict: self.err("relation %s is not a map" % (r[0])) else: if 'scope' in r[1]: scope = r[1]['scope'] if scope not in KNOWN_SCOPES: self.err("Unknown scope found in relation %s - (%s)" % (r[0], scope)) if 'interface' in r[1]: interface = r[1]['interface'] if interface in template_interfaces: self.err("template interface names should be " "changed: " + interface) else: self.err("relation missing interface") for key in r[1].keys(): if key not in KNOWN_RELATION_KEYS: self.err( "Unknown relation field in relation %s - (%s)" % (r[0], key)) r = r[0] if r in template_relations: self.err("template relations should be renamed to fit " "charm: " + r) has_one = False has_one = has_one or self.check_hook( r + '-relation-changed', hooks_path) has_one = has_one or self.check_hook( r + '-relation-departed', hooks_path) has_one = has_one or self.check_hook( r + '-relation-joined', hooks_path) has_one = has_one or self.check_hook( r + '-relation-broken', hooks_path) if not has_one and not subordinate: self.info("relation " + r + " has no hooks") def check_config_file(self, charm_path): config_path = os.path.join(charm_path, 'config.yaml') if not os.path.isfile(config_path): self.info('File config.yaml not found.') return try: with open(config_path) as config_file: config = yaml.safe_load(config_file.read()) except Exception as error: self.err('Cannot parse config.yaml: %s' % error) return if not isinstance(config, dict): self.err('config.yaml not parsed into a dictionary.') return if 'options' not in config: self.err('config.yaml must have an "options" key.') return if len(config) > 1: wrong_keys = sorted(config) wrong_keys.pop(wrong_keys.index('options')) self.warn('Ignored keys in config.yaml: %s' % wrong_keys) options = config['options'] if not isinstance(options, dict): self.err( 'config.yaml: options section is not parsed as a dictionary') return for option_name, option_value in options.items(): if not re.match('^[a-z0-9]+[\w-]+[a-z0-9]+$', option_name, flags=re.IGNORECASE): self.err('config.yaml: %s does not conform to naming pattern' % option_name) if not isinstance(option_value, dict): self.err( 'config.yaml: data for option %s is not a dict' % option_name) continue existing_keys = set(option_value) missing_keys = KNOWN_OPTION_KEYS - existing_keys if missing_keys: self.warn( 'config.yaml: option %s does not have the keys: %s' % ( option_name, ', '.join(sorted(missing_keys)))) invalid_keys = existing_keys - KNOWN_OPTION_KEYS if invalid_keys: invalid_keys = [str(key) for key in sorted(invalid_keys)] self.warn( 'config.yaml: option %s has unknown keys: %s' % ( option_name, ', '.join(invalid_keys))) if 'description' in existing_keys: if not isinstance(option_value['description'], basestring) or \ option_value['description'].strip() == '': self.warn( 'config.yaml: description of option %s should be a ' 'non-empty string' % option_name) option_type = option_value.get('type', 'string') if option_type not in KNOWN_OPTION_TYPES: self.warn('config.yaml: option %s has an invalid type (%s)' % (option_name, option_type)) elif 'default' in option_value: expected_type = KNOWN_OPTION_TYPES[option_type] actual_value = option_value['default'] if actual_value is None: notify = (self.info if expected_type in ALLOW_NONE_DEFAULT else self.warn) notify( 'config.yaml: option %s has no default value' % option_name) elif not isinstance(actual_value, expected_type): self.err( 'config.yaml: type of option %s is specified as ' '%s, but the type of the default value is %s' % (option_name, option_type, type(actual_value).__name__)) else: # Nothing to do: the option type is valid but no default # value exists. pass class Charm(object): def __init__(self, path): self.charm_path = path if not self.is_charm(): raise Exception('Not a Charm') def is_charm(self): return os.path.isfile(os.path.join(self.charm_path, 'metadata.yaml')) def proof(self): lint = CharmLinter() charm_name = self.charm_path if os.path.isdir(charm_name): charm_path = charm_name else: charm_home = os.getenv('CHARM_HOME', '.') charm_path = os.path.join(charm_home, charm_name) if not os.path.isdir(charm_path): lint.crit("%s is not a directory, Aborting" % charm_path) return lint.lint, lint.exit_code hooks_path = os.path.join(charm_path, 'hooks') actions_path = os.path.join(charm_path, 'actions') yaml_path = os.path.join(charm_path, 'metadata.yaml') actions_yaml_file = os.path.join(charm_path, 'actions.yaml') try: yamlfile = open(yaml_path, 'r') try: charm = yaml.safe_load(yamlfile) except Exception as e: lint.crit('cannot parse ' + yaml_path + ":" + str(e)) return lint.lint, lint.exit_code yamlfile.close() for key in charm.keys(): if key not in KNOWN_METADATA_KEYS: lint.err("Unknown root metadata field (%s)" % key) charm_basename = os.path.basename(charm_path) if charm['name'] != charm_basename: msg = ( "metadata name (%s) must match directory name (%s)" " exactly for local deployment.") % (charm['name'], charm_basename) lint.info(msg) # summary should be short if len(charm['summary']) > 72: lint.warn('summary should be less than 72') validate_maintainer(charm, lint) validate_categories_and_tags(charm, lint) validate_storage(charm, lint) validate_series(charm, lint) validate_min_juju_version(charm, lint) validate_extra_bindings(charm, lint) validate_payloads(charm, lint) if not os.path.exists(os.path.join(charm_path, 'icon.svg')): lint.info("No icon.svg file.") else: # should have an icon.svg template_sha1 = hashlib.sha1() icon_sha1 = hashlib.sha1() try: with open(TEMPLATE_ICON) as ti: template_sha1.update(ti.read()) with open(os.path.join(charm_path, 'icon.svg')) as ci: icon_sha1.update(ci.read()) if template_sha1.hexdigest() == icon_sha1.hexdigest(): lint.info("Includes template icon.svg file.") except IOError as e: lint.info( "Error while opening %s (%s)" % (e.filename, e.strerror)) # Must have a hooks dir if not os.path.exists(hooks_path): lint.info("no hooks directory") # Must have a copyright file if not os.path.exists(os.path.join(charm_path, 'copyright')): lint.warn("no copyright file") # should have a readme root_files = os.listdir(charm_path) found_readmes = set() for filename in root_files: if filename.upper().find('README') != -1: found_readmes.add(filename) if len(found_readmes): if 'README.ex' in found_readmes: lint.warn("Includes template README.ex file") try: with open(TEMPLATE_README) as tr: bad_lines = [] for line in tr: if len(line) >= 40: bad_lines.append(line.strip()) for readme in found_readmes: readme_path = os.path.join(charm_path, readme) with open(readme_path) as r: readme_content = r.read() lc = 0 for l in bad_lines: if not len(l): continue lc += 1 if l in readme_content: err_msg = ('%s includes boilerplate: ' '%s') lint.warn(err_msg % (readme, l)) except IOError as e: lint.warn( "Error while opening %s (%s)" % (e.filename, e.strerror)) else: lint.warn("no README file") subordinate = charm.get('subordinate', False) if type(subordinate) != bool: lint.err("subordinate must be a boolean value") # All charms should provide at least one thing provides = charm.get('provides') if provides is not None: lint.check_relation_hooks(provides, subordinate, hooks_path) else: if not subordinate: lint.info("all charms should provide at least one thing") if subordinate: try: requires = charm.get('requires') if requires is not None: found_scope_container = False for rel_name, rel in requires.iteritems(): if 'scope' in rel: if rel['scope'] == 'container': found_scope_container = True break if not found_scope_container: raise RelationError else: raise RelationError except RelationError: lint.err("subordinates must have at least one scope: " "container relation") else: requires = charm.get('requires') if requires is not None: lint.check_relation_hooks(requires, subordinate, hooks_path) peers = charm.get('peers') if peers is not None: lint.check_relation_hooks(peers, subordinate, hooks_path) if 'revision' in charm: lint.warn("Revision should not be stored in metadata.yaml " "anymore. Move it to the revision file") # revision must be an integer try: x = int(charm['revision']) if x < 0: raise ValueError except (TypeError, ValueError): lint.warn("revision should be a positive integer") lint.check_hook('install', hooks_path, recommended=True) lint.check_hook('start', hooks_path, recommended=True) lint.check_hook('stop', hooks_path, recommended=True) if os.path.exists(os.path.join(charm_path, 'config.yaml')): lint.check_hook('config-changed', hooks_path, recommended=True) else: lint.check_hook('config-changed', hooks_path) if os.path.exists(actions_yaml_file): with open(actions_yaml_file) as f: try: actions = yaml.safe_load(f.read()) except Exception as e: lint.crit('cannot parse ' + actions_yaml_file + ":" + str(e)) validate_actions(actions, actions_path, lint) except IOError: lint.err("could not find metadata file for " + charm_name) lint.exit_code = -1 # Should not have autogen test if os.path.exists(os.path.join(charm_path, 'tests', '00-autogen')): lint.warn('Includes template test file, tests/00-autogen') rev_path = os.path.join(charm_path, 'revision') if os.path.exists(rev_path): with open(rev_path, 'r') as rev_file: content = rev_file.read().rstrip() try: int(content) except ValueError: lint.err("revision file contains non-numeric data") lint.check_config_file(charm_path) return lint.lint, lint.exit_code def metadata(self): metadata = None with open(os.path.join(self.charm_path, 'metadata.yaml')) as f: metadata = yaml.safe_load(f.read()) return metadata def promulgate(self): pass class Boolean(object): def deserialize(self, node, cstruct): if cstruct is colander.null: return colander.null if isinstance(cstruct, bool): cstruct = str(cstruct).lower() if cstruct not in ('true', 'false'): raise colander.Invalid( node, '"%s" is not one of true, false' % cstruct) class StorageItem(colander.MappingSchema): def schema_type(self, **kw): return colander.Mapping(unknown='raise') type_ = colander.SchemaNode( colander.String(), validator=colander.OneOf(['filesystem', 'block']), name='type', ) description = colander.SchemaNode( colander.String(), missing='', ) shared = colander.SchemaNode( Boolean(), missing=False, ) read_only = colander.SchemaNode( Boolean(), missing=False, name='read-only', ) minimum_size = colander.SchemaNode( colander.String(), validator=colander.Regex( r'^\d+[MGTP]?$', msg='must be a number followed by an optional ' 'M/G/T/P, e.g. 100M' ), missing='', name='minimum-size', ) location = colander.SchemaNode( colander.String(), missing='', ) @colander.instantiate(missing={}) class multiple(colander.MappingSchema): def schema_type(self, **kw): return colander.Mapping(unknown='raise') range_ = colander.SchemaNode( colander.String(), validator=colander.Regex( r'^\d+-?(\d+)?$', msg='supported formats are: m (a fixed number), ' 'm-n (an explicit range), and m- (a minimum number)' ), name='range', ) class PayloadItem(colander.MappingSchema): def schema_type(self, **kw): return colander.Mapping(unknown='raise') type_ = colander.SchemaNode( colander.String(), validator=colander.OneOf(['kvm', 'docker']), name='type', ) def validate_extra_bindings(charm, linter): """Validate extra-bindings in charm metadata. :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'extra-bindings' not in charm: return if not isinstance(charm['extra-bindings'], dict): linter.err('extra-bindings: must be a dictionary') def validate_min_juju_version(charm, linter): """Validate min-juju-version in charm metadata. Must match the regex and be 2.0.0 or greater. :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'min-juju-version' not in charm: return pattern = r'^(\d{1,9})\.(\d{1,9})(\.|-(\w+))(\d{1,9})(\.\d{1,9})?$' match = re.match(pattern, charm['min-juju-version']) if not match: linter.err('min-juju-version: invalid format, try X.Y.Z') return if int(match.group(1)) < 2: linter.err( 'min-juju-version: invalid version, must be 2.0.0 or greater') def validate_series(charm, linter): """Validate supported series list in charm metadata. We don't validate the actual series names because: 1. `charm push` does that anyway 2. our list of valid series would be constantly falling out-of-date :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'series' not in charm: return if not isinstance(charm['series'], list): linter.err('series: must be a list of series names') def validate_storage(charm, linter): """Validate storage configuration in charm metadata. :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'storage' not in charm: return if (not isinstance(charm['storage'], dict) or not charm['storage']): linter.err('storage: must be a dictionary of storage definitions') return schema = colander.SchemaNode(colander.Mapping()) for storage_def in charm['storage']: schema.add(StorageItem(name=storage_def)) try: schema.deserialize(charm['storage']) except colander.Invalid as e: for k, v in e.asdict().items(): linter.err('storage.{}: {}'.format(k, v)) def validate_payloads(charm, linter): """Validate paylaod configuration in charm metadata. :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'payloads' not in charm: return if (not isinstance(charm['payloads'], dict) or not charm['payloads']): linter.err('payloads: must be a dictionary of payload definitions') return schema = colander.SchemaNode(colander.Mapping()) for payload_def in charm['payloads']: schema.add(PayloadItem(name=payload_def)) try: schema.deserialize(charm['payloads']) except colander.Invalid as e: for k, v in e.asdict().items(): linter.err('payloads.{}: {}'.format(k, v)) def validate_actions(actions, action_hooks, linter): """Validate actions in a charm. :param actions: dict of charm actions parsed from actions.yaml :param action_hooks: path of charm's /actions/ directory :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if not actions: return if not isinstance(actions, dict): linter.err('actions: must be a dictionary of json schemas') return # TODO: Schema validation for k in actions: if k.startswith('juju'): linter.err('actions.{}: juju is a reserved namespace'.format(k)) continue h = os.path.join(action_hooks, k) if not os.path.isfile(h): linter.warn('actions.{0}: actions/{0} does not exist'.format(k)) elif not os.access(h, os.X_OK): linter.err('actions.{0}: actions/{0} is not executable'.format(k)) def validate_maintainer(charm, linter): """Validate maintainer info in charm metadata. :param charm: dict of charm metadata parsed from metadata.yaml :param linter: :class:`CharmLinter` object to which info/warning/error messages will be written """ if 'maintainer' in charm and 'maintainers' in charm: linter.err( 'Charm must not have both maintainer and maintainers fields') return if 'maintainer' not in charm and 'maintainers' not in charm: linter.err( 'Charm must have either a maintainer or maintainers field') return maintainers = [] if 'maintainer' in charm: if isinstance(charm['maintainer'], list): linter.err('Maintainer field must not be a list') return maintainers = [charm['maintainer']] elif 'maintainers' in charm: if not isinstance(charm['maintainers'], list): linter.err('Maintainers field must be a list') return maintainers = charm['maintainers'] for maintainer in maintainers: (name, address) = email.utils.parseaddr(maintainer) formatted = email.utils.formataddr((name, address)) if formatted.replace('"', '') != maintainer: linter.warn( 'Maintainer format should be "Name ", ' 'not "%s"' % formatted) def validate_categories_and_tags(charm, linter): if 'categories' not in charm and 'tags' not in charm: linter.warn('Metadata missing required field "tags"') return if 'tags' in charm: tags = charm['tags'] if type(tags) != list or tags == []: linter.warn('Metadata field "tags" must be a non-empty list') if 'categories' in charm: categories = charm['categories'] if type(categories) != list or categories == []: # The category names are not validated because they may # change. linter.warn( 'Categories metadata must be a list of one or more of: ' 'applications, app-servers, databases, file-servers, ' 'cache-proxy, misc') linter.warn( 'Categories are being deprecated in favor of tags. ' 'Please rename the "categories" field to "tags".' ) def remote(): lp = Launchpad.login_anonymously('charm-tools', 'production', version='devel') charm = lp.distributions['charms'] current_series = str(charm.current_series).split('/').pop() branches = charm.getBranchTips() charms = [] for branch in branches: try: branch_series = str(branch[2][0]).split('/')[0] charm_name = str(branch[0]).split('/')[3] except IndexError: branch_series = '' if branch_series == current_series: charms.append("lp:charms/%s" % charm_name) else: charms.append("lp:%s" % branch[0]) return charms def local(directory): '''Show charms that actually exist locally. Different than Mr.list''' local_charms = [] for charm in os.listdir(directory): if os.path.exists(os.join(directory, charm, '.bzr')): local_charms.append(charm) return local_charms charm-tools-2.1.2/charmtools/linter.py0000664000175000017500000000123512650157641020224 0ustar marcomarco00000000000000 class Linter(object): def __init__(self, debug=False): self.lint = [] self.exit_code = 0 self.debug = debug def crit(self, msg): """Called when checking cannot continue.""" self.err("FATAL: " + msg) def err(self, msg): global EXIT_CODE self.lint.append("E: " + msg) if self.exit_code < 200: self.exit_code = 200 def info(self, msg): """Ignorable but sometimes useful.""" self.lint.append("I: " + msg) def warn(self, msg): global EXIT_CODE self.lint.append("W: " + msg) if self.exit_code < 100: self.exit_code = 100 charm-tools-2.1.2/charmtools/unpromulgate.py0000664000175000017500000000162412650157641021453 0ustar marcomarco00000000000000#!/usr/bin/env python # Copyright (C) 2013 Marco Ceppi . # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import sys from .promulgate import parse_options, main_ def main(): options, args = parse_options(unprom_opt=False) return main_(options, args) if __name__ == '__main__': sys.exit(main()) charm-tools-2.1.2/charmtools/diff_match_patch.py0000664000175000017500000020453612650157641022203 0ustar marcomarco00000000000000#!/usr/bin/python2.4 from __future__ import division """Diff Match and Patch Copyright 2006 Google Inc. http://code.google.com/p/google-diff-match-patch/ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """Functions for diff, match and patch. Computes the difference between two texts to create a patch. Applies the patch onto another text, allowing for errors. """ __author__ = 'fraser@google.com (Neil Fraser)' import math import re import sys import time import urllib class diff_match_patch: """Class containing the diff, match and patch methods. Also contains the behaviour settings. """ def __init__(self): """Inits a diff_match_patch object with default settings. Redefine these in your program to override the defaults. """ # Number of seconds to map a diff before giving up (0 for infinity). self.Diff_Timeout = 1.0 # Cost of an empty edit operation in terms of edit characters. self.Diff_EditCost = 4 # At what point is no match declared (0.0 = perfection, 1.0 = very loose). self.Match_Threshold = 0.5 # How far to search for a match (0 = exact location, 1000+ = broad match). # A match this many characters away from the expected location will add # 1.0 to the score (0.0 is a perfect match). self.Match_Distance = 1000 # When deleting a large block of text (over ~64 characters), how close do # the contents have to be to match the expected contents. (0.0 = perfection, # 1.0 = very loose). Note that Match_Threshold controls how closely the # end points of a delete need to match. self.Patch_DeleteThreshold = 0.5 # Chunk size for context length. self.Patch_Margin = 4 # The number of bits in an int. # Python has no maximum, thus to disable patch splitting set to 0. # However to avoid long patches in certain pathological cases, use 32. # Multiple short patches (using native ints) are much faster than long ones. self.Match_MaxBits = 32 # DIFF FUNCTIONS # The data structure representing a diff is an array of tuples: # [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")] # which means: delete "Hello", add "Goodbye" and keep " world." DIFF_DELETE = -1 DIFF_INSERT = 1 DIFF_EQUAL = 0 def diff_main(self, text1, text2, checklines=True, deadline=None): """Find the differences between two texts. Simplifies the problem by stripping any common prefix or suffix off the texts before diffing. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Optional speedup flag. If present and false, then don't run a line-level diff first to identify the changed areas. Defaults to true, which does a faster, slightly less optimal diff. deadline: Optional time when the diff should be complete by. Used internally for recursive calls. Users should set DiffTimeout instead. Returns: Array of changes. """ # Set a deadline by which time the diff must be complete. if deadline == None: # Unlike in most languages, Python counts time in seconds. if self.Diff_Timeout <= 0: deadline = sys.maxint else: deadline = time.time() + self.Diff_Timeout # Check for null inputs. if text1 == None or text2 == None: raise ValueError("Null inputs. (diff_main)") # Check for equality (speedup). if text1 == text2: if text1: return [(self.DIFF_EQUAL, text1)] return [] # Trim off common prefix (speedup). commonlength = self.diff_commonPrefix(text1, text2) commonprefix = text1[:commonlength] text1 = text1[commonlength:] text2 = text2[commonlength:] # Trim off common suffix (speedup). commonlength = self.diff_commonSuffix(text1, text2) if commonlength == 0: commonsuffix = '' else: commonsuffix = text1[-commonlength:] text1 = text1[:-commonlength] text2 = text2[:-commonlength] # Compute the diff on the middle block. diffs = self.diff_compute(text1, text2, checklines, deadline) # Restore the prefix and suffix. if commonprefix: diffs[:0] = [(self.DIFF_EQUAL, commonprefix)] if commonsuffix: diffs.append((self.DIFF_EQUAL, commonsuffix)) self.diff_cleanupMerge(diffs) return diffs def diff_compute(self, text1, text2, checklines, deadline): """Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes. """ if not text1: # Just add some text (speedup). return [(self.DIFF_INSERT, text2)] if not text2: # Just delete some text (speedup). return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: # Shorter text is inside the longer text (speedup). diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] # Swap insertions for deletions if diff is reversed. if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: # Single character string. # After the previous speedup, the character can't be an equality. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] # Check to see if the problem can be split in two. hm = self.diff_halfMatch(text1, text2) if hm: # A half-match was found, sort out the return data. (text1_a, text1_b, text2_a, text2_b, mid_common) = hm # Send both pairs off for separate processing. diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) # Merge the results. return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline) def diff_lineMode(self, text1, text2, deadline): """Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes. """ # Scan the text on a line-by-line basis first. (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) # Convert the diff back to original text. self.diff_charsToLines(diffs, linearray) # Eliminate freak matches (e.g. blank lines) self.diff_cleanupSemantic(diffs) # Rediff any replacement blocks, this time character-by-character. # Add a dummy entry at the end. diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete >= 1 and count_insert >= 1: # Delete the offending records and add the merged ones. a = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = a pointer = pointer - count_delete - count_insert + len(a) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() # Remove the dummy entry at the end. return diffs def diff_bisect(self, text1, text2, deadline): """Find the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples. """ # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) max_d = (text1_length + text2_length + 1) // 2 v_offset = max_d v_length = 2 * max_d v1 = [-1] * v_length v1[v_offset + 1] = 0 v2 = v1[:] delta = text1_length - text2_length # If the total number of characters is odd, then the front path will # collide with the reverse path. front = (delta % 2 != 0) # Offsets for start and end of k loop. # Prevents mapping of space beyond the grid. k1start = 0 k1end = 0 k2start = 0 k2end = 0 for d in xrange(max_d): # Bail out if deadline is reached. if time.time() > deadline: break # Walk the front path one step. for k1 in xrange(-d + k1start, d + 1 - k1end, 2): k1_offset = v_offset + k1 if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]): x1 = v1[k1_offset + 1] else: x1 = v1[k1_offset - 1] + 1 y1 = x1 - k1 while (x1 < text1_length and y1 < text2_length and text1[x1] == text2[y1]): x1 += 1 y1 += 1 v1[k1_offset] = x1 if x1 > text1_length: # Ran off the right of the graph. k1end += 2 elif y1 > text2_length: # Ran off the bottom of the graph. k1start += 2 elif front: k2_offset = v_offset + delta - k1 if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1: # Mirror x2 onto top-left coordinate system. x2 = text1_length - v2[k2_offset] if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Walk the reverse path one step. for k2 in xrange(-d + k2start, d + 1 - k2end, 2): k2_offset = v_offset + k2 if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]): x2 = v2[k2_offset + 1] else: x2 = v2[k2_offset - 1] + 1 y2 = x2 - k2 while (x2 < text1_length and y2 < text2_length and text1[-x2 - 1] == text2[-y2 - 1]): x2 += 1 y2 += 1 v2[k2_offset] = x2 if x2 > text1_length: # Ran off the left of the graph. k2end += 2 elif y2 > text2_length: # Ran off the top of the graph. k2start += 2 elif not front: k1_offset = v_offset + delta - k2 if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1: x1 = v1[k1_offset] y1 = v_offset + x1 - k1_offset # Mirror x2 onto top-left coordinate system. x2 = text1_length - x2 if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Diff took too long and hit the deadline or # number of diffs equals number of characters, no commonality at all. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] def diff_bisectSplit(self, text1, text2, x, y, deadline): """Given the location of the 'middle snake', split the diff in two parts and recurse. Args: text1: Old string to be diffed. text2: New string to be diffed. x: Index of split point in text1. y: Index of split point in text2. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples. """ text1a = text1[:x] text2a = text2[:y] text1b = text1[x:] text2b = text2[y:] # Compute both diffs serially. diffs = self.diff_main(text1a, text2a, False, deadline) diffsb = self.diff_main(text1b, text2b, False, deadline) return diffs + diffsb def diff_linesToChars(self, text1, text2): """Split two texts into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Args: text1: First string. text2: Second string. Returns: Three element tuple, containing the encoded text1, the encoded text2 and the array of unique strings. The zeroth element of the array of unique strings is intentionally blank. """ lineArray = [] # e.g. lineArray[4] == "Hello\n" lineHash = {} # e.g. lineHash["Hello\n"] == 4 # "\x00" is a valid character, but various debuggers don't like it. # So we'll insert a junk entry to avoid generating a null character. lineArray.append('') def diff_linesToCharsMunge(text): """Split a text into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Modifies linearray and linehash through being a closure. Args: text: String to encode. Returns: Encoded string. """ chars = [] # Walk the text, pulling out a substring for each line. # text.split('\n') would would temporarily double our memory footprint. # Modifying text would create many large strings to garbage collect. lineStart = 0 lineEnd = -1 while lineEnd < len(text) - 1: lineEnd = text.find('\n', lineStart) if lineEnd == -1: lineEnd = len(text) - 1 line = text[lineStart:lineEnd + 1] lineStart = lineEnd + 1 if line in lineHash: chars.append(unichr(lineHash[line])) else: lineArray.append(line) lineHash[line] = len(lineArray) - 1 chars.append(unichr(len(lineArray) - 1)) return "".join(chars) chars1 = diff_linesToCharsMunge(text1) chars2 = diff_linesToCharsMunge(text2) return (chars1, chars2, lineArray) def diff_charsToLines(self, diffs, lineArray): """Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings. """ for x in xrange(len(diffs)): text = [] for char in diffs[x][1]: text.append(lineArray[ord(char)]) diffs[x] = (diffs[x][0], "".join(text)) def diff_commonPrefix(self, text1, text2): """Determine the common prefix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the start of each string. """ # Quick check for common null cases. if not text1 or not text2 or text1[0] != text2[0]: return 0 # Binary search. # Performance analysis: http://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerstart = 0 while pointermin < pointermid: if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: pointermin = pointermid pointerstart = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid def diff_commonSuffix(self, text1, text2): """Determine the common suffix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the end of each string. """ # Quick check for common null cases. if not text1 or not text2 or text1[-1] != text2[-1]: return 0 # Binary search. # Performance analysis: http://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if (text1[-pointermid:len(text1) - pointerend] == text2[-pointermid:len(text2) - pointerend]): pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid def diff_commonOverlap(self, text1, text2): """Determine if the suffix of one string is the prefix of another. Args: text1 First string. text2 Second string. Returns: The number of characters common to the end of the first string and the start of the second string. """ # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) # Eliminate the null case. if text1_length == 0 or text2_length == 0: return 0 # Truncate the longer string. if text1_length > text2_length: text1 = text1[-text2_length:] elif text1_length < text2_length: text2 = text2[:text1_length] text_length = min(text1_length, text2_length) # Quick check for the worst case. if text1 == text2: return text_length # Start by looking for a single character match # and increase length until no match is found. # Performance analysis: http://neil.fraser.name/news/2010/11/04/ best = 0 length = 1 while True: pattern = text1[-length:] found = text2.find(pattern) if found == -1: return best length += found if found == 0 or text1[-length:] == text2[:length]: best = length length += 1 def diff_halfMatch(self, text1, text2): """Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match. """ if self.Diff_Timeout <= 0: # Don't risk returning a non-optimal diff if we have unlimited time. return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None # Pointless. def diff_halfMatchI(longtext, shorttext, i): """Does a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? Closure, but does not reference any external variables. Args: longtext: Longer string. shorttext: Shorter string. i: Start index of quarter length substring within longtext. Returns: Five element Array, containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle. Or None if there was no match. """ seed = longtext[i:i + len(longtext) // 4] best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None # First check if the second quarter is the seed for a half-match. hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) # Check again based on the third quarter. hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: # Both matched. Select the longest. if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 # A half-match was found, sort out the return data. if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common) def diff_cleanupSemantic(self, diffs): """Reduce the number of edits by eliminating semantically trivial equalities. Args: diffs: Array of diff tuples. """ changes = False equalities = [] # Stack of indices where equalities are found. lastequality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. # Number of chars that changed prior to the equality. length_insertions1, length_deletions1 = 0, 0 # Number of chars that changed after the equality. length_insertions2, length_deletions2 = 0, 0 while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. equalities.append(pointer) length_insertions1, length_insertions2 = length_insertions2, 0 length_deletions1, length_deletions2 = length_deletions2, 0 lastequality = diffs[pointer][1] else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_INSERT: length_insertions2 += len(diffs[pointer][1]) else: length_deletions2 += len(diffs[pointer][1]) # Eliminate an equality that is smaller or equal to the edits on both # sides of it. if (lastequality and (len(lastequality) <= max(length_insertions1, length_deletions1)) and (len(lastequality) <= max(length_insertions2, length_deletions2))): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) # Throw away the equality we just deleted. equalities.pop() # Throw away the previous equality (it needs to be reevaluated). if len(equalities): equalities.pop() if len(equalities): pointer = equalities[-1] else: pointer = -1 # Reset the counters. length_insertions1, length_deletions1 = 0, 0 length_insertions2, length_deletions2 = 0, 0 lastequality = None changes = True pointer += 1 # Normalize the diff. if changes: self.diff_cleanupMerge(diffs) self.diff_cleanupSemanticLossless(diffs) # Find any overlaps between deletions and insertions. # e.g: abcxxxxxxdef # -> abcxxxdef # e.g: xxxabcdefxxx # -> defxxxabc # Only extract an overlap if it is as big as the edit ahead or behind it. pointer = 1 while pointer < len(diffs): if (diffs[pointer - 1][0] == self.DIFF_DELETE and diffs[pointer][0] == self.DIFF_INSERT): deletion = diffs[pointer - 1][1] insertion = diffs[pointer][1] overlap_length1 = self.diff_commonOverlap(deletion, insertion) overlap_length2 = self.diff_commonOverlap(insertion, deletion) if overlap_length1 >= overlap_length2: if (overlap_length1 >= len(deletion) / 2.0 or overlap_length1 >= len(insertion) / 2.0): # Overlap found. Insert an equality and trim the surrounding edits. diffs.insert(pointer, (self.DIFF_EQUAL, insertion[:overlap_length1])) diffs[pointer - 1] = (self.DIFF_DELETE, deletion[:len(deletion) - overlap_length1]) diffs[pointer + 1] = (self.DIFF_INSERT, insertion[overlap_length1:]) pointer += 1 else: if (overlap_length2 >= len(deletion) / 2.0 or overlap_length2 >= len(insertion) / 2.0): # Reverse overlap found. # Insert an equality and swap and trim the surrounding edits. diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])) diffs[pointer - 1] = (self.DIFF_INSERT, insertion[:len(insertion) - overlap_length2]) diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:]) pointer += 1 pointer += 1 pointer += 1 def diff_cleanupSemanticLossless(self, diffs): """Look for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. e.g: The cat came. -> The cat came. Args: diffs: Array of diff tuples. """ def diff_cleanupSemanticScore(one, two): """Given two strings, compute a score representing whether the internal boundary falls on logical boundaries. Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. Args: one: First string. two: Second string. Returns: The score. """ if not one or not two: # Edges are the best. return 6 # Each port of this function behaves slightly differently due to # subtle differences in each language's definition of things like # 'whitespace'. Since this function's purpose is largely cosmetic, # the choice has been made to use each language's native features # rather than force total conformity. char1 = one[-1] char2 = two[0] nonAlphaNumeric1 = not char1.isalnum() nonAlphaNumeric2 = not char2.isalnum() whitespace1 = nonAlphaNumeric1 and char1.isspace() whitespace2 = nonAlphaNumeric2 and char2.isspace() lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n") lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n") blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) if blankLine1 or blankLine2: # Five points for blank lines. return 5 elif lineBreak1 or lineBreak2: # Four points for line breaks. return 4 elif nonAlphaNumeric1 and not whitespace1 and whitespace2: # Three points for end of sentences. return 3 elif whitespace1 or whitespace2: # Two points for whitespace. return 2 elif nonAlphaNumeric1 or nonAlphaNumeric2: # One point for non-alphanumeric. return 1 return 0 pointer = 1 # Intentionally ignore the first and last element (don't need checking). while pointer < len(diffs) - 1: if (diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL): # This is a single edit surrounded by equalities. equality1 = diffs[pointer - 1][1] edit = diffs[pointer][1] equality2 = diffs[pointer + 1][1] # First, shift the edit as far left as possible. commonOffset = self.diff_commonSuffix(equality1, edit) if commonOffset: commonString = edit[-commonOffset:] equality1 = equality1[:-commonOffset] edit = commonString + edit[:-commonOffset] equality2 = commonString + equality2 # Second, step character by character right, looking for the best fit. bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 bestScore = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) while edit and equality2 and edit[0] == equality2[0]: equality1 += edit[0] edit = edit[1:] + equality2[0] equality2 = equality2[1:] score = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) # The >= encourages trailing rather than leading whitespace on edits. if score >= bestScore: bestScore = score bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 if diffs[pointer - 1][1] != bestEquality1: # We have an improvement, save it back to the diff. if bestEquality1: diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) else: del diffs[pointer - 1] pointer -= 1 diffs[pointer] = (diffs[pointer][0], bestEdit) if bestEquality2: diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) else: del diffs[pointer + 1] pointer -= 1 pointer += 1 # Define some regex patterns for matching boundaries. BLANKLINEEND = re.compile(r"\n\r?\n$"); BLANKLINESTART = re.compile(r"^\r?\n\r?\n"); def diff_cleanupEfficiency(self, diffs): """Reduce the number of edits by eliminating operationally trivial equalities. Args: diffs: Array of diff tuples. """ changes = False equalities = [] # Stack of indices where equalities are found. lastequality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. pre_ins = False # Is there an insertion operation before the last equality. pre_del = False # Is there a deletion operation before the last equality. post_ins = False # Is there an insertion operation after the last equality. post_del = False # Is there a deletion operation after the last equality. while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. if (len(diffs[pointer][1]) < self.Diff_EditCost and (post_ins or post_del)): # Candidate found. equalities.append(pointer) pre_ins = post_ins pre_del = post_del lastequality = diffs[pointer][1] else: # Not a candidate, and can never become one. equalities = [] lastequality = None post_ins = post_del = False else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_DELETE: post_del = True else: post_ins = True # Five types to be split: # ABXYCD # AXCD # ABXC # AXCD # ABXC if lastequality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastequality) < self.Diff_EditCost / 2) and (pre_ins + pre_del + post_ins + post_del) == 3)): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) equalities.pop() # Throw away the equality we just deleted. lastequality = None if pre_ins and pre_del: # No changes made which could affect previous entry, keep going. post_ins = post_del = True equalities = [] else: if len(equalities): equalities.pop() # Throw away the previous equality. if len(equalities): pointer = equalities[-1] else: pointer = -1 post_ins = post_del = False changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs) def diff_cleanupMerge(self, diffs): """Reorder and merge like edit sections. Merge equalities. Any edit section can move as long as it doesn't cross an equality. Args: diffs: Array of diff tuples. """ diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end. pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] pointer += 1 elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] pointer += 1 elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete + count_insert > 1: if count_delete != 0 and count_insert != 0: # Factor out any common prefixies. commonlength = self.diff_commonPrefix(text_insert, text_delete) if commonlength != 0: x = pointer - count_delete - count_insert - 1 if x >= 0 and diffs[x][0] == self.DIFF_EQUAL: diffs[x] = (diffs[x][0], diffs[x][1] + text_insert[:commonlength]) else: diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength])) pointer += 1 text_insert = text_insert[commonlength:] text_delete = text_delete[commonlength:] # Factor out any common suffixies. commonlength = self.diff_commonSuffix(text_insert, text_delete) if commonlength != 0: diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] + diffs[pointer][1]) text_insert = text_insert[:-commonlength] text_delete = text_delete[:-commonlength] # Delete the offending records and add the merged ones. if count_delete == 0: diffs[pointer - count_insert : pointer] = [ (self.DIFF_INSERT, text_insert)] elif count_insert == 0: diffs[pointer - count_delete : pointer] = [ (self.DIFF_DELETE, text_delete)] else: diffs[pointer - count_delete - count_insert : pointer] = [ (self.DIFF_DELETE, text_delete), (self.DIFF_INSERT, text_insert)] pointer = pointer - count_delete - count_insert + 1 if count_delete != 0: pointer += 1 if count_insert != 0: pointer += 1 elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL: # Merge this equality with the previous one. diffs[pointer - 1] = (diffs[pointer - 1][0], diffs[pointer - 1][1] + diffs[pointer][1]) del diffs[pointer] else: pointer += 1 count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' if diffs[-1][1] == '': diffs.pop() # Remove the dummy entry at the end. # Second pass: look for single edits surrounded on both sides by equalities # which can be shifted sideways to eliminate an equality. # e.g: ABAC -> ABAC changes = False pointer = 1 # Intentionally ignore the first and last element (don't need checking). while pointer < len(diffs) - 1: if (diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL): # This is a single edit surrounded by equalities. if diffs[pointer][1].endswith(diffs[pointer - 1][1]): # Shift the edit over the previous equality. diffs[pointer] = (diffs[pointer][0], diffs[pointer - 1][1] + diffs[pointer][1][:-len(diffs[pointer - 1][1])]) diffs[pointer + 1] = (diffs[pointer + 1][0], diffs[pointer - 1][1] + diffs[pointer + 1][1]) del diffs[pointer - 1] changes = True elif diffs[pointer][1].startswith(diffs[pointer + 1][1]): # Shift the edit over the next equality. diffs[pointer - 1] = (diffs[pointer - 1][0], diffs[pointer - 1][1] + diffs[pointer + 1][1]) diffs[pointer] = (diffs[pointer][0], diffs[pointer][1][len(diffs[pointer + 1][1]):] + diffs[pointer + 1][1]) del diffs[pointer + 1] changes = True pointer += 1 # If shifts were made, the diff needs reordering and another shift sweep. if changes: self.diff_cleanupMerge(diffs) def diff_xIndex(self, diffs, loc): """loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 Args: diffs: Array of diff tuples. loc: Location within text1. Returns: Location within text2. """ chars1 = 0 chars2 = 0 last_chars1 = 0 last_chars2 = 0 for x in xrange(len(diffs)): (op, text) = diffs[x] if op != self.DIFF_INSERT: # Equality or deletion. chars1 += len(text) if op != self.DIFF_DELETE: # Equality or insertion. chars2 += len(text) if chars1 > loc: # Overshot the location. break last_chars1 = chars1 last_chars2 = chars2 if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE: # The location was deleted. return last_chars2 # Add the remaining len(character). return last_chars2 + (loc - last_chars1) def diff_prettyHtml(self, diffs): """Convert a diff array into a pretty HTML report. Args: diffs: Array of diff tuples. Returns: HTML representation. """ html = [] for (op, data) in diffs: text = (data.replace("&", "&").replace("<", "<") .replace(">", ">").replace("\n", "¶
")) if op == self.DIFF_INSERT: html.append("%s" % text) elif op == self.DIFF_DELETE: html.append("%s" % text) elif op == self.DIFF_EQUAL: html.append("%s" % text) return "".join(html) def diff_text1(self, diffs): """Compute and return the source text (all equalities and deletions). Args: diffs: Array of diff tuples. Returns: Source text. """ text = [] for (op, data) in diffs: if op != self.DIFF_INSERT: text.append(data) return "".join(text) def diff_text2(self, diffs): """Compute and return the destination text (all equalities and insertions). Args: diffs: Array of diff tuples. Returns: Destination text. """ text = [] for (op, data) in diffs: if op != self.DIFF_DELETE: text.append(data) return "".join(text) def diff_levenshtein(self, diffs): """Compute the Levenshtein distance; the number of inserted, deleted or substituted characters. Args: diffs: Array of diff tuples. Returns: Number of changes. """ levenshtein = 0 insertions = 0 deletions = 0 for (op, data) in diffs: if op == self.DIFF_INSERT: insertions += len(data) elif op == self.DIFF_DELETE: deletions += len(data) elif op == self.DIFF_EQUAL: # A deletion and an insertion is one substitution. levenshtein += max(insertions, deletions) insertions = 0 deletions = 0 levenshtein += max(insertions, deletions) return levenshtein def diff_toDelta(self, diffs): """Crush the diff into an encoded string which describes the operations required to transform text1 into text2. E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. Args: diffs: Array of diff tuples. Returns: Delta text. """ text = [] for (op, data) in diffs: if op == self.DIFF_INSERT: # High ascii will raise UnicodeDecodeError. Use Unicode instead. data = data.encode("utf-8") text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# ")) elif op == self.DIFF_DELETE: text.append("-%d" % len(data)) elif op == self.DIFF_EQUAL: text.append("=%d" % len(data)) return "\t".join(text) def diff_fromDelta(self, text1, delta): """Given the original text1, and an encoded string which describes the operations required to transform text1 into text2, compute the full diff. Args: text1: Source string for the diff. delta: Delta text. Returns: Array of diff tuples. Raises: ValueError: If invalid input. """ if type(delta) == unicode: # Deltas should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, delta is invalid. delta = delta.encode("ascii") diffs = [] pointer = 0 # Cursor in text1 tokens = delta.split("\t") for token in tokens: if token == "": # Blank tokens are ok (from a trailing \t). continue # Each token begins with a one character parameter which specifies the # operation of this token (delete, insert, equality). param = token[1:] if token[0] == "+": param = urllib.unquote(param).decode("utf-8") diffs.append((self.DIFF_INSERT, param)) elif token[0] == "-" or token[0] == "=": try: n = int(param) except ValueError: raise ValueError("Invalid number in diff_fromDelta: " + param) if n < 0: raise ValueError("Negative number in diff_fromDelta: " + param) text = text1[pointer : pointer + n] pointer += n if token[0] == "=": diffs.append((self.DIFF_EQUAL, text)) else: diffs.append((self.DIFF_DELETE, text)) else: # Anything else is an error. raise ValueError("Invalid diff operation in diff_fromDelta: " + token[0]) if pointer != len(text1): raise ValueError( "Delta length (%d) does not equal source text length (%d)." % (pointer, len(text1))) return diffs # MATCH FUNCTIONS def match_main(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc'. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Check for null inputs. if text == None or pattern == None: raise ValueError("Null inputs. (match_main)") loc = max(0, min(loc, len(text))) if text == pattern: # Shortcut (potentially not guaranteed by the algorithm) return 0 elif not text: # Nothing to match. return -1 elif text[loc:loc + len(pattern)] == pattern: # Perfect match at the perfect spot! (Includes case of null pattern) return loc else: # Do a fuzzy compare. match = self.match_bitap(text, pattern, loc) return match def match_bitap(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Python doesn't have a maxint limit, so ignore this check. #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: # raise ValueError("Pattern too long for this application.") # Initialise the alphabet. s = self.match_alphabet(pattern) def match_bitapScore(e, x): """Compute and return the score for a match with e errors and x location. Accesses loc and pattern through being a closure. Args: e: Number of errors in match. x: Location of match. Returns: Overall score for match (0.0 = good, 1.0 = bad). """ accuracy = float(e) / len(pattern) proximity = abs(loc - x) if not self.Match_Distance: # Dodge divide by zero error. return proximity and 1.0 or accuracy return accuracy + (proximity / float(self.Match_Distance)) # Highest score beyond which we give up. score_threshold = self.Match_Threshold # Is there a nearby exact match? (speedup) best_loc = text.find(pattern, loc) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # What about in the other direction? (speedup) best_loc = text.rfind(pattern, loc + len(pattern)) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # Initialise the bit arrays. matchmask = 1 << (len(pattern) - 1) best_loc = -1 bin_max = len(pattern) + len(text) # Empty initialization added to appease pychecker. last_rd = None for d in xrange(len(pattern)): # Scan for the best match each iteration allows for one more error. # Run a binary search to determine how far from 'loc' we can stray at # this error level. bin_min = 0 bin_mid = bin_max while bin_min < bin_mid: if match_bitapScore(d, loc + bin_mid) <= score_threshold: bin_min = bin_mid else: bin_max = bin_mid bin_mid = (bin_max - bin_min) // 2 + bin_min # Use the result from this iteration as the maximum for the next. bin_max = bin_mid start = max(1, loc - bin_mid + 1) finish = min(loc + bin_mid, len(text)) + len(pattern) rd = [0] * (finish + 2) rd[finish + 1] = (1 << d) - 1 for j in xrange(finish, start - 1, -1): if len(text) <= j - 1: # Out of range. charMatch = 0 else: charMatch = s.get(text[j - 1], 0) if d == 0: # First pass: exact match. rd[j] = ((rd[j + 1] << 1) | 1) & charMatch else: # Subsequent passes: fuzzy match. rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] if rd[j] & matchmask: score = match_bitapScore(d, j - 1) # This match will almost certainly be better than any existing match. # But check anyway. if score <= score_threshold: # Told you so. score_threshold = score best_loc = j - 1 if best_loc > loc: # When passing loc, don't exceed our current distance from loc. start = max(1, 2 * loc - best_loc) else: # Already passed loc, downhill from here on in. break # No hope for a (better) match at greater error levels. if match_bitapScore(d + 1, loc) > score_threshold: break last_rd = rd return best_loc def match_alphabet(self, pattern): """Initialise the alphabet for the Bitap algorithm. Args: pattern: The text to encode. Returns: Hash of character locations. """ s = {} for char in pattern: s[char] = 0 for i in xrange(len(pattern)): s[pattern[i]] |= 1 << (len(pattern) - i - 1) return s # PATCH FUNCTIONS def patch_addContext(self, patch, text): """Increase the context until it is unique, but don't let the pattern expand beyond Match_MaxBits. Args: patch: The patch to grow. text: Source text. """ if len(text) == 0: return pattern = text[patch.start2 : patch.start2 + patch.length1] padding = 0 # Look for the first and last matches of pattern in text. If two different # matches are found, increase the pattern length. while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits == 0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin - self.Patch_Margin)): padding += self.Patch_Margin pattern = text[max(0, patch.start2 - padding) : patch.start2 + patch.length1 + padding] # Add one chunk for good luck. padding += self.Patch_Margin # Add the prefix. prefix = text[max(0, patch.start2 - padding) : patch.start2] if prefix: patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)] # Add the suffix. suffix = text[patch.start2 + patch.length1 : patch.start2 + patch.length1 + padding] if suffix: patch.diffs.append((self.DIFF_EQUAL, suffix)) # Roll back the start points. patch.start1 -= len(prefix) patch.start2 -= len(prefix) # Extend lengths. patch.length1 += len(prefix) + len(suffix) patch.length2 += len(prefix) + len(suffix) def patch_make(self, a, b=None, c=None): """Compute a list of patches to turn text1 into text2. Use diffs if provided, otherwise compute it ourselves. There are four ways to call this function, depending on what data is available to the caller: Method 1: a = text1, b = text2 Method 2: a = diffs Method 3 (optimal): a = text1, b = diffs Method 4 (deprecated, use method 3): a = text1, b = text2, c = diffs Args: a: text1 (methods 1,3,4) or Array of diff tuples for text1 to text2 (method 2). b: text2 (methods 1,4) or Array of diff tuples for text1 to text2 (method 3) or undefined (method 2). c: Array of diff tuples for text1 to text2 (method 4) or undefined (methods 1,2,3). Returns: Array of Patch objects. """ text1 = None diffs = None # Note that texts may arrive as 'str' or 'unicode'. if isinstance(a, basestring) and isinstance(b, basestring) and c is None: # Method 1: text1, text2 # Compute diffs from text1 and text2. text1 = a diffs = self.diff_main(text1, b, True) if len(diffs) > 2: self.diff_cleanupSemantic(diffs) self.diff_cleanupEfficiency(diffs) elif isinstance(a, list) and b is None and c is None: # Method 2: diffs # Compute text1 from diffs. diffs = a text1 = self.diff_text1(diffs) elif isinstance(a, basestring) and isinstance(b, list) and c is None: # Method 3: text1, diffs text1 = a diffs = b elif (isinstance(a, basestring) and isinstance(b, basestring) and isinstance(c, list)): # Method 4: text1, text2, diffs # text2 is not used. text1 = a diffs = c else: raise ValueError("Unknown call format to patch_make.") if not diffs: return [] # Get rid of the None case. patches = [] patch = patch_obj() char_count1 = 0 # Number of characters into the text1 string. char_count2 = 0 # Number of characters into the text2 string. prepatch_text = text1 # Recreate the patches to determine context info. postpatch_text = text1 for x in xrange(len(diffs)): (diff_type, diff_text) = diffs[x] if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: # A new patch starts here. patch.start1 = char_count1 patch.start2 = char_count2 if diff_type == self.DIFF_INSERT: # Insertion patch.diffs.append(diffs[x]) patch.length2 += len(diff_text) postpatch_text = (postpatch_text[:char_count2] + diff_text + postpatch_text[char_count2:]) elif diff_type == self.DIFF_DELETE: # Deletion. patch.length1 += len(diff_text) patch.diffs.append(diffs[x]) postpatch_text = (postpatch_text[:char_count2] + postpatch_text[char_count2 + len(diff_text):]) elif (diff_type == self.DIFF_EQUAL and len(diff_text) <= 2 * self.Patch_Margin and len(patch.diffs) != 0 and len(diffs) != x + 1): # Small equality inside a patch. patch.diffs.append(diffs[x]) patch.length1 += len(diff_text) patch.length2 += len(diff_text) if (diff_type == self.DIFF_EQUAL and len(diff_text) >= 2 * self.Patch_Margin): # Time for a new patch. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) patch = patch_obj() # Unlike Unidiff, our patch lists have a rolling context. # http://code.google.com/p/google-diff-match-patch/wiki/Unidiff # Update prepatch text & pos to reflect the application of the # just completed patch. prepatch_text = postpatch_text char_count1 = char_count2 # Update the current character count. if diff_type != self.DIFF_INSERT: char_count1 += len(diff_text) if diff_type != self.DIFF_DELETE: char_count2 += len(diff_text) # Pick up the leftover patch if not empty. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) return patches def patch_deepCopy(self, patches): """Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects. """ patchesCopy = [] for patch in patches: patchCopy = patch_obj() # No need to deep copy the tuples since they are immutable. patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 patchesCopy.append(patchCopy) return patchesCopy def patch_apply(self, patches, text): """Merge a set of patches onto the text. Return a patched text, as well as a list of true/false values indicating which patches were applied. Args: patches: Array of Patch objects. text: Old text. Returns: Two element Array, containing the new text and an array of boolean values. """ if not patches: return (text, []) # Deep copy the patches so that no changes are made to originals. patches = self.patch_deepCopy(patches) nullPadding = self.patch_addPadding(patches) text = nullPadding + text + nullPadding self.patch_splitMax(patches) # delta keeps track of the offset between the expected and actual location # of the previous patch. If there are patches expected at positions 10 and # 20, but the first patch was found at 12, delta is 2 and the second patch # has an effective expected position of 22. delta = 0 results = [] for patch in patches: expected_loc = patch.start2 + delta text1 = self.diff_text1(patch.diffs) end_loc = -1 if len(text1) > self.Match_MaxBits: # patch_splitMax will only provide an oversized pattern in the case of # a monster delete. start_loc = self.match_main(text, text1[:self.Match_MaxBits], expected_loc) if start_loc != -1: end_loc = self.match_main(text, text1[-self.Match_MaxBits:], expected_loc + len(text1) - self.Match_MaxBits) if end_loc == -1 or start_loc >= end_loc: # Can't find valid trailing context. Drop this patch. start_loc = -1 else: start_loc = self.match_main(text, text1, expected_loc) if start_loc == -1: # No match found. :( results.append(False) # Subtract the delta for this failed patch from subsequent patches. delta -= patch.length2 - patch.length1 else: # Found a match. :) results.append(True) delta = start_loc - expected_loc if end_loc == -1: text2 = text[start_loc : start_loc + len(text1)] else: text2 = text[start_loc : end_loc + self.Match_MaxBits] if text1 == text2: # Perfect match, just shove the replacement text in. text = (text[:start_loc] + self.diff_text2(patch.diffs) + text[start_loc + len(text1):]) else: # Imperfect match. # Run a diff to get a framework of equivalent indices. diffs = self.diff_main(text1, text2, False) if (len(text1) > self.Match_MaxBits and self.diff_levenshtein(diffs) / float(len(text1)) > self.Patch_DeleteThreshold): # The end points match, but the content is unacceptably bad. results[-1] = False else: self.diff_cleanupSemanticLossless(diffs) index1 = 0 for (op, data) in patch.diffs: if op != self.DIFF_EQUAL: index2 = self.diff_xIndex(diffs, index1) if op == self.DIFF_INSERT: # Insertion text = text[:start_loc + index2] + data + text[start_loc + index2:] elif op == self.DIFF_DELETE: # Deletion text = text[:start_loc + index2] + text[start_loc + self.diff_xIndex(diffs, index1 + len(data)):] if op != self.DIFF_DELETE: index1 += len(data) # Strip the padding off. text = text[len(nullPadding):-len(nullPadding)] return (text, results) def patch_addPadding(self, patches): """Add some padding on text start and end so that edges can match something. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. Returns: The padding string added to each side. """ paddingLength = self.Patch_Margin nullPadding = "" for x in xrange(1, paddingLength + 1): nullPadding += chr(x) # Bump all the patches forward. for patch in patches: patch.start1 += paddingLength patch.start2 += paddingLength # Add some padding on start of first diff. patch = patches[0] diffs = patch.diffs if not diffs or diffs[0][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.insert(0, (self.DIFF_EQUAL, nullPadding)) patch.start1 -= paddingLength # Should be 0. patch.start2 -= paddingLength # Should be 0. patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[0][1]): # Grow first equality. extraLength = paddingLength - len(diffs[0][1]) newText = nullPadding[len(diffs[0][1]):] + diffs[0][1] diffs[0] = (diffs[0][0], newText) patch.start1 -= extraLength patch.start2 -= extraLength patch.length1 += extraLength patch.length2 += extraLength # Add some padding on end of last diff. patch = patches[-1] diffs = patch.diffs if not diffs or diffs[-1][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.append((self.DIFF_EQUAL, nullPadding)) patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[-1][1]): # Grow last equality. extraLength = paddingLength - len(diffs[-1][1]) newText = diffs[-1][1] + nullPadding[:extraLength] diffs[-1] = (diffs[-1][0], newText) patch.length1 += extraLength patch.length2 += extraLength return nullPadding def patch_splitMax(self, patches): """Look through the patches and break up any which are longer than the maximum limit of the match algorithm. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. """ patch_size = self.Match_MaxBits if patch_size == 0: # Python has the option of not splitting strings due to its ability # to handle integers of arbitrary precision. return for x in xrange(len(patches)): if patches[x].length1 <= patch_size: continue bigpatch = patches[x] # Remove the big old patch. del patches[x] x -= 1 start1 = bigpatch.start1 start2 = bigpatch.start2 precontext = '' while len(bigpatch.diffs) != 0: # Create one of several smaller patches. patch = patch_obj() empty = True patch.start1 = start1 - len(precontext) patch.start2 = start2 - len(precontext) if precontext: patch.length1 = patch.length2 = len(precontext) patch.diffs.append((self.DIFF_EQUAL, precontext)) while (len(bigpatch.diffs) != 0 and patch.length1 < patch_size - self.Patch_Margin): (diff_type, diff_text) = bigpatch.diffs[0] if diff_type == self.DIFF_INSERT: # Insertions are harmless. patch.length2 += len(diff_text) start2 += len(diff_text) patch.diffs.append(bigpatch.diffs.pop(0)) empty = False elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and patch.diffs[0][0] == self.DIFF_EQUAL and len(diff_text) > 2 * patch_size): # This is a large deletion. Let it pass in one chunk. patch.length1 += len(diff_text) start1 += len(diff_text) empty = False patch.diffs.append((diff_type, diff_text)) del bigpatch.diffs[0] else: # Deletion or equality. Only take as much as we can stomach. diff_text = diff_text[:patch_size - patch.length1 - self.Patch_Margin] patch.length1 += len(diff_text) start1 += len(diff_text) if diff_type == self.DIFF_EQUAL: patch.length2 += len(diff_text) start2 += len(diff_text) else: empty = False patch.diffs.append((diff_type, diff_text)) if diff_text == bigpatch.diffs[0][1]: del bigpatch.diffs[0] else: bigpatch.diffs[0] = (bigpatch.diffs[0][0], bigpatch.diffs[0][1][len(diff_text):]) # Compute the head context for the next patch. precontext = self.diff_text2(patch.diffs) precontext = precontext[-self.Patch_Margin:] # Append the end context for this patch. postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin] if postcontext: patch.length1 += len(postcontext) patch.length2 += len(postcontext) if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL: patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] + postcontext) else: patch.diffs.append((self.DIFF_EQUAL, postcontext)) if not empty: x += 1 patches.insert(x, patch) def patch_toText(self, patches): """Take a list of patches and return a textual representation. Args: patches: Array of Patch objects. Returns: Text representation of patches. """ text = [] for patch in patches: text.append(str(patch)) return "".join(text) def patch_fromText(self, textline): """Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input. """ if type(textline) == unicode: # Patches should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, patch is invalid. textline = textline.encode("ascii") patches = [] if not textline: return patches text = textline.split('\n') while len(text) != 0: m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0]) if not m: raise ValueError("Invalid patch string: " + text[0]) patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if m.group(2) == '': patch.start1 -= 1 patch.length1 = 1 elif m.group(2) == '0': patch.length1 = 0 else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if m.group(4) == '': patch.start2 -= 1 patch.length2 = 1 elif m.group(4) == '0': patch.length2 = 0 else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while len(text) != 0: if text[0]: sign = text[0][0] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode("utf-8") if sign == '+': # Insertion. patch.diffs.append((self.DIFF_INSERT, line)) elif sign == '-': # Deletion. patch.diffs.append((self.DIFF_DELETE, line)) elif sign == ' ': # Minor equality. patch.diffs.append((self.DIFF_EQUAL, line)) elif sign == '@': # Start of next patch. break elif sign == '': # Blank line? Whatever. pass else: # WTF? raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) del text[0] return patches class patch_obj: """Class representing one patch operation. """ def __init__(self): """Initializes with an empty list of diffs. """ self.diffs = [] self.start1 = None self.start2 = None self.length1 = 0 self.length2 = 0 def __str__(self): """Emmulate GNU diff's format. Header: @@ -382,8 +481,9 @@ Indicies are printed as 1-based, not 0-based. Returns: The GNU diff string. """ if self.length1 == 0: coords1 = str(self.start1) + ",0" elif self.length1 == 1: coords1 = str(self.start1 + 1) else: coords1 = str(self.start1 + 1) + "," + str(self.length1) if self.length2 == 0: coords2 = str(self.start2) + ",0" elif self.length2 == 1: coords2 = str(self.start2 + 1) else: coords2 = str(self.start2 + 1) + "," + str(self.length2) text = ["@@ -", coords1, " +", coords2, " @@\n"] # Escape the body of the patch with %xx notation. for (op, data) in self.diffs: if op == diff_match_patch.DIFF_INSERT: text.append("+") elif op == diff_match_patch.DIFF_DELETE: text.append("-") elif op == diff_match_patch.DIFF_EQUAL: text.append(" ") # High ascii will raise UnicodeDecodeError. Use Unicode instead. data = data.encode("utf-8") text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n") return "".join(text) charm-tools-2.1.2/charmtools/list.py0000664000175000017500000000174712650157641017712 0ustar marcomarco00000000000000#!/usr/bin/env python # Copyright (C) 2013 Marco Ceppi . # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import sys from . import charms def main(): if len(sys.argv) > 1: print('usage: list [ --help ]') if sys.argv[1] == '--help': sys.exit(0) else: sys.exit(1) print "\n".join(charms.remote()) if __name__ == "__main__": main() charm-tools-2.1.2/charmtools/cli.py0000664000175000017500000000235212674364412017501 0ustar marcomarco00000000000000import os import sys import glob def parser_defaults(parser): parser.add_argument('-b', '--bundle', action='store_true', help='Process as a bundle') parser.add_argument('--debug', action='store_true', help='Provide additional debug information') return parser def usage(exit_code=0, bundle=False): sys.stderr.write( 'Get help for a charm subcommand\n\n' 'usage: %s subcommand\n' % os.path.basename(sys.argv[0])) subs = subcommands(os.path.dirname(os.path.realpath(__file__))) sys.stderr.write('\n Available subcommands are:\n ') sys.stderr.write('\n '.join(subs)) sys.stderr.write('\n') sys.exit(exit_code) def subcommands(scripts_dir): subs = [] for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') for cmd in glob.glob(os.path.join(path, 'charm-*%s' % ext())): sub = os.path.basename(cmd) sub = sub.split('charm-')[1].replace(ext(), '') subs.append(sub) subs = sorted(set(subs)) # Removes blacklisted items from the subcommands list. return filter(lambda s: s not in ['mr', 'charms'], subs) def ext(): return '.exe' if os.name == 'nt' else '' charm-tools-2.1.2/charmtools/generate.py0000664000175000017500000001106212676737527020537 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2013 Marco Ceppi . # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import shutil import argparse from Cheetah.Template import Template from cli import parser_defaults from charms import Charm from charmstore import CharmStore from charmstore.error import CharmNotFound from . import utils TPL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates') CHARM_TPL = os.path.join(TPL_DIR, 'charm') def graph(interface, endpoint, series='trusty'): matches = { 'requires': 'provides', 'provides': 'requires', } match = matches[endpoint] c = CharmStore() try: charms = getattr(c, match)(interface) except CharmNotFound: return None charms = [c for c in charms if c.series == series] if charms: return charms[0] else: return None def copy_file(tpl_file, charm_dir, is_bundle=False, debug=False): c = Charm(charm_dir) if not c.is_charm(): raise Exception('%s is not a charm' % charm_dir) shutil.copy(os.path.join(CHARM_TPL, tpl_file), charm_dir) def tests(charm_dir, is_bundle=False, debug=False, series='trusty'): c = Charm(charm_dir) if not c.is_charm(): raise Exception('Not a Charm') mdata = c.metadata() interfaces = {} deploy = [mdata['name']] relations = [] for rel_type in ['provides', 'requires']: if rel_type in mdata: interfaces[rel_type] = {} for rel, data in mdata[rel_type].iteritems(): iface = data['interface'] if iface and iface not in interfaces[rel_type]: r = graph(iface, rel_type, series=series) # If we dont find an interface, do nothing if r is None: continue interfaces[rel_type][iface] = r deploy.append(r.name) relations.append(['%s:%s' % (mdata['name'], rel), r.name]) t = Template(file=os.path.join(TPL_DIR, 'tests', '99-autogen.tpl'), searchList=[{'deploy': deploy, 'relate': relations, 'series': series}]) if not os.path.exists(os.path.join(charm_dir, 'tests')): os.mkdir(os.path.join(charm_dir, 'tests')) with open(os.path.join(charm_dir, 'tests', '99-autogen'), 'w') as f: f.write(str(t)) if not os.path.exists(os.path.join(charm_dir, 'tests', '00-setup')): with open(os.path.join(charm_dir, 'tests', '00-setup'), 'w') as f: f.write("""#!/bin/bash sudo add-apt-repository ppa:juju/stable -y sudo apt-get update sudo apt-get install amulet python3-requests -y """) os.chmod(os.path.join(charm_dir, 'tests', '99-autogen'), 0755) os.chmod(os.path.join(charm_dir, 'tests', '00-setup'), 0755) def parser(args=None): parser = argparse.ArgumentParser( description='add icon, readme, or tests to a charm') parser.add_argument('subcommand', choices=['tests', 'readme', 'icon'], help='Which type of generator to run') utils.add_plugin_description(parser) parser = parser_defaults(parser) return parser.parse_known_args(args) def tests_parser(args): # This bites, need an argparser experter parser = argparse.ArgumentParser(description="add tests to a charm") parser.add_argument('--series', '-s', default='trusty', help='Series for the generated test') return parser.parse_args(args) def main(): a, extra = parser() if a.subcommand == 'tests': opts = tests_parser(extra) tests(os.getcwd(), is_bundle=a.bundle, debug=a.debug, series=opts.series) elif a.subcommand == 'readme': copy_file('README.ex', os.getcwd(), is_bundle=a.bundle, debug=a.debug) elif a.subcommand == 'icon': copy_file('icon.svg', os.getcwd(), is_bundle=a.bundle, debug=a.debug) else: raise Exception('No subcommand found') if __name__ == '__main__': main() charm-tools-2.1.2/charmtools/templates/0000775000175000017500000000000012677251067020360 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/0000775000175000017500000000000012677251067021775 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/template.py0000664000175000017500000000564512650157641024166 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import subprocess import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class AnsibleCharmTemplate(CharmTemplate): skip_parsing = ['README.ex', 'Makefile'] def create_charm(self, config, output_dir): self._copy_files(output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if outfile in self.skip_parsing: continue self._template_file(config, path.join(root, outfile)) self._cleanup_hooks(config, output_dir) self._install_charmhelpers(output_dir) def _copy_files(self, output_dir): here = path.abspath(path.dirname(__file__)) template_dir = path.join(here, 'files') if os.path.exists(output_dir): shutil.rmtree(output_dir) shutil.copytree(template_dir, output_dir) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) def _cleanup_hooks(self, config, output_dir): # Symlinks must be relative so that they are copied properly # when output_dir is moved to it's final location. for link in ['config-changed', 'install', 'start', 'stop', 'upgrade-charm']: os.symlink('hooks.py', os.path.join(output_dir, 'hooks', link)) def _install_charmhelpers(self, output_dir): helpers_dest = os.path.join(output_dir, 'lib', 'charmhelpers') if not os.path.exists(helpers_dest): os.makedirs(helpers_dest) cmd = './scripts/charm_helpers_sync.py -c charm-helpers.yaml' subprocess.check_call(cmd.split(), cwd=output_dir) charm-tools-2.1.2/charmtools/templates/ansible/files/0000775000175000017500000000000012677251067023077 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/files/hooks/0000775000175000017500000000000012677251067024222 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/files/hooks/hooks.py0000775000175000017500000000161412650157641025716 0ustar marcomarco00000000000000#!/usr/bin/env python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) import charmhelpers.contrib.ansible # Create the hooks helper, passing a list of hooks which will be # handled by default by running all sections of the playbook # tagged with the hook name. hooks = charmhelpers.contrib.ansible.AnsibleHooks( playbook_path='playbooks/site.yaml', default_hooks=[ 'start', 'stop', 'config-changed', 'upgrade-charm', ]) @hooks.hook('install', 'upgrade-charm') def install(): """Install ansible. The hook() helper decorating this install function ensures that after this function finishes, any tasks in the playbook tagged with install or upgrade-charm are executed. """ charmhelpers.contrib.ansible.install_ansible_support(from_ppa=True) if __name__ == "__main__": hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/ansible/files/unit_tests/0000775000175000017500000000000012677251067025300 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/files/unit_tests/test_hooks.py0000664000175000017500000000353312650157641030032 0ustar marcomarco00000000000000import unittest try: import mock except ImportError: raise ImportError( "Please ensure both python-mock and python-nose are installed.") from hooks import hooks class InstallHookTestCase(unittest.TestCase): def setUp(self): super(InstallHookTestCase, self).setUp() patcher = mock.patch('hooks.charmhelpers') self.mock_charmhelpers = patcher.start() self.addCleanup(patcher.stop) self.mock_charmhelpers.core.hookenv.config.return_value = { 'install_deps_from_ppa': False, } patcher = mock.patch('charmhelpers.contrib.ansible.apply_playbook') self.mock_apply_playbook = patcher.start() self.addCleanup(patcher.stop) def test_installs_ansible_support(self): hooks.execute(['install']) ansible = self.mock_charmhelpers.contrib.ansible ansible.install_ansible_support.assert_called_once_with( from_ppa=True) def test_applies_install_playbook(self): hooks.execute(['install']) self.assertEqual([ mock.call('playbooks/site.yaml', tags=['install']), ], self.mock_apply_playbook.call_args_list) class DefaultHooksTestCase(unittest.TestCase): def setUp(self): super(DefaultHooksTestCase, self).setUp() patcher = mock.patch('charmhelpers.contrib.ansible.apply_playbook') self.mock_apply_playbook = patcher.start() self.addCleanup(patcher.stop) def test_default_hooks(self): """Most of the hooks let ansible do all the work.""" for hook in ('start', 'stop', 'config-changed'): self.mock_apply_playbook.reset_mock() hooks.execute([hook]) self.assertEqual([ mock.call('playbooks/site.yaml', tags=[hook]), ], self.mock_apply_playbook.call_args_list) charm-tools-2.1.2/charmtools/templates/ansible/files/README.ex0000664000175000017500000000411212650157641024362 0ustar marcomarco00000000000000# Overview Describe the intended usage of this charm and anything unique about how this charm relates to others here. This README will be displayed in the Charm Store, it should be either Markdown or RST. Ideal READMEs include instructions on how to use the charm, expected usage, and charm features that your audience might be interested in. For an example of a well written README check out Hadoop: http://jujucharms.com/charms/precise/hadoop Use this as a Markdown reference if you need help with the formatting of this README: http://askubuntu.com/editing-help This charm provides [service](http://example.com). Add a description here of what the service itself actually does. Also remember to check the [icon guidelines](https://jujucharms.com/docs/stable/authors-charm-icon) so that your charm looks good in the Juju GUI. # Usage Step by step instructions on using the charm: juju deploy servicename and so on. If you're providing a web service or something that the end user needs to go to, tell them here, especially if you're deploying a service that might listen to a non-default port. You can then browse to http://ip-address to configure the service. ## Scale out Usage If the charm has any recommendations for running at scale, outline them in examples here. For example if you have a memcached relation that improves performance, mention it here. ## Known Limitations and Issues This not only helps users but gives people a place to start if they want to help you add features to your charm. # Configuration The configuration options will be listed on the charm store, however If you're making assumptions or opinionated decisions in the charm (like setting a default administrator password), you should detail that here so the user knows how to change it immediately, etc. # Contact Information Though this will be listed in the charm store itself don't assume a user will know that, so include that information here: ## Upstream Project Name - Upstream website - Upstream bug tracker - Upstream mailing list or contact information - Feel free to add things if it's useful for users charm-tools-2.1.2/charmtools/templates/ansible/files/Makefile0000664000175000017500000000077212650157641024537 0ustar marcomarco00000000000000#!/usr/bin/make build: virtualenv lint test virtualenv: .venv/bin/python .venv/bin/python: sudo apt-get install python-virtualenv virtualenv .venv .venv/bin/pip install nose flake8 mock pyyaml lint: @.venv/bin/flake8 hooks unit_tests @charm proof test: @echo Starting tests... @CHARM_DIR=. PYTHONPATH=./hooks .venv/bin/nosetests --nologcapture unit_tests sync-charm-helpers: @.venv/bin/python scripts/charm_helpers_sync.py -c charm-helpers.yaml clean: rm -rf .venv find -name *.pyc -delete charm-tools-2.1.2/charmtools/templates/ansible/files/playbooks/0000775000175000017500000000000012677251067025102 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/files/playbooks/site.yaml0000664000175000017500000000252512650157641026730 0ustar marcomarco00000000000000# The tasks here are left as examples and should be removed/replaced by the # charm author. Some things to note: # # 1. All charm config values are available as template variables # e.g. repo -> {{repo}}, app-name -> {{app_name}} (note underscore) # # 2. Along with charm config values, the following variables are also # made available as template vars: # # charm_dir # local_unit # unit_private_address # unit_public_address # # 3. Use tags to control when each task is executed. The tags list should # contain one or more hook names. - hosts: all tasks: - name: Install required packages. apt: pkg={{ item }} state=latest update_cache=yes with_items: - python-django - python-django-celery tags: - install - upgrade-charm - name: Put app code in place. git: repo=git://github.com/absoludity/charm-bootstrap-ansible.git dest=/srv/myapp version=HEAD tags: - install - config-changed - name: Start service debug: msg="You'd start some service here. The config 'string-option' has the value '{{ string_option }}'" tags: - start - config-changed - name: Stop service debug: msg="You'd stop some service here. The config 'string-option' is '{{ string_option }}'" tags: - stop charm-tools-2.1.2/charmtools/templates/ansible/files/charm-helpers.yaml0000664000175000017500000000022512650157641026506 0ustar marcomarco00000000000000destination: lib/charmhelpers branch: lp:charm-helpers include: - core - fetch - contrib.ansible|inc=* - contrib.templating.contexts charm-tools-2.1.2/charmtools/templates/ansible/files/config.yaml0000664000175000017500000000056712650157641025232 0ustar marcomarco00000000000000options: string-option: type: string default: "Default Value" description: "A short description of the configuration option" boolean-option: type: boolean default: False description: "A short description of the configuration option" int-option: type: int default: 9001 description: "A short description of the configuration option" charm-tools-2.1.2/charmtools/templates/ansible/files/revision0000664000175000017500000000000212650157641024642 0ustar marcomarco000000000000001 charm-tools-2.1.2/charmtools/templates/ansible/files/metadata.yaml0000664000175000017500000000072012650157641025534 0ustar marcomarco00000000000000name: $metadata.package summary: $metadata.summary maintainer: $metadata.maintainer description: | $metadata.description tags: # Replace "misc" with one or more whitelisted tags from this list: # https://jujucharms.com/docs/stable/authors-charm-metadata - misc subordinate: false provides: provides-relation: interface: interface-name requires: requires-relation: interface: interface-name peers: peer-relation: interface: interface-name charm-tools-2.1.2/charmtools/templates/ansible/files/icon.svg0000664000175000017500000002361312650157641024547 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/ansible/files/scripts/0000775000175000017500000000000012677251067024566 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/ansible/files/scripts/charm_helpers_sync.py0000775000175000017500000001577212650157641031021 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright 2013 Canonical Ltd. # Authors: # Adam Gandelman # import logging import optparse import os import subprocess import shutil import sys import tempfile import yaml from fnmatch import fnmatch CHARM_HELPERS_BRANCH = 'lp:charm-helpers' def parse_config(conf_file): if not os.path.isfile(conf_file): logging.error('Invalid config file: %s.' % conf_file) return False return yaml.load(open(conf_file).read()) def clone_helpers(work_dir, branch): dest = os.path.join(work_dir, 'charm-helpers') logging.info('Checking out %s to %s.' % (branch, dest)) cmd = ['bzr', 'branch', branch, dest] subprocess.check_call(cmd) return dest def _module_path(module): return os.path.join(*module.split('.')) def _src_path(src, module): return os.path.join(src, 'charmhelpers', _module_path(module)) def _dest_path(dest, module): return os.path.join(dest, _module_path(module)) def _is_pyfile(path): return os.path.isfile(path + '.py') def ensure_init(path): ''' ensure directories leading up to path are importable, omitting parent directory, eg path='/hooks/helpers/foo'/: hooks/ hooks/helpers/__init__.py hooks/helpers/foo/__init__.py ''' for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): _i = os.path.join(d, '__init__.py') if not os.path.exists(_i): logging.info('Adding missing __init__.py: %s' % _i) open(_i, 'wb').close() def sync_pyfile(src, dest): src = src + '.py' src_dir = os.path.dirname(src) logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) if not os.path.exists(dest): os.makedirs(dest) shutil.copy(src, dest) if os.path.isfile(os.path.join(src_dir, '__init__.py')): shutil.copy(os.path.join(src_dir, '__init__.py'), dest) ensure_init(dest) def get_filter(opts=None): opts = opts or [] if 'inc=*' in opts: # do not filter any files, include everything return None def _filter(dir, ls): incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] _filter = [] for f in ls: _f = os.path.join(dir, f) if not os.path.isdir(_f) and not _f.endswith('.py') and incs: if True not in [fnmatch(_f, inc) for inc in incs]: logging.debug('Not syncing %s, does not match include ' 'filters (%s)' % (_f, incs)) _filter.append(f) else: logging.debug('Including file, which matches include ' 'filters (%s): %s' % (incs, _f)) elif (os.path.isfile(_f) and not _f.endswith('.py')): logging.debug('Not syncing file: %s' % f) _filter.append(f) elif (os.path.isdir(_f) and not os.path.isfile(os.path.join(_f, '__init__.py'))): logging.debug('Not syncing directory: %s' % f) _filter.append(f) return _filter return _filter def sync_directory(src, dest, opts=None): if os.path.exists(dest): logging.debug('Removing existing directory: %s' % dest) shutil.rmtree(dest) logging.info('Syncing directory: %s -> %s.' % (src, dest)) shutil.copytree(src, dest, ignore=get_filter(opts)) ensure_init(dest) def sync(src, dest, module, opts=None): if os.path.isdir(_src_path(src, module)): sync_directory(_src_path(src, module), _dest_path(dest, module), opts) elif _is_pyfile(_src_path(src, module)): sync_pyfile(_src_path(src, module), os.path.dirname(_dest_path(dest, module))) else: logging.warn('Could not sync: %s. Neither a pyfile or directory, ' 'does it even exist?' % module) def parse_sync_options(options): if not options: return [] return options.split(',') def extract_options(inc, global_options=None): global_options = global_options or [] if global_options and isinstance(global_options, basestring): global_options = [global_options] if '|' not in inc: return (inc, global_options) inc, opts = inc.split('|') return (inc, parse_sync_options(opts) + global_options) def sync_helpers(include, src, dest, options=None): if not os.path.isdir(dest): os.mkdir(dest) global_options = parse_sync_options(options) for inc in include: if isinstance(inc, str): inc, opts = extract_options(inc, global_options) sync(src, dest, inc, opts) elif isinstance(inc, dict): # could also do nested dicts here. for k, v in inc.iteritems(): if isinstance(v, list): for m in v: inc, opts = extract_options(m, global_options) sync(src, dest, '%s.%s' % (k, inc), opts) if __name__ == '__main__': parser = optparse.OptionParser() parser.add_option('-c', '--config', action='store', dest='config', default=None, help='helper config file') parser.add_option('-D', '--debug', action='store_true', dest='debug', default=False, help='debug') parser.add_option('-b', '--branch', action='store', dest='branch', help='charm-helpers bzr branch (overrides config)') parser.add_option('-d', '--destination', action='store', dest='dest_dir', help='sync destination dir (overrides config)') (opts, args) = parser.parse_args() if opts.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if opts.config: logging.info('Loading charm helper config from %s.' % opts.config) config = parse_config(opts.config) if not config: logging.error('Could not parse config from %s.' % opts.config) sys.exit(1) else: config = {} if 'branch' not in config: config['branch'] = CHARM_HELPERS_BRANCH if opts.branch: config['branch'] = opts.branch if opts.dest_dir: config['destination'] = opts.dest_dir if 'destination' not in config: logging.error('No destination dir. specified as option or config.') sys.exit(1) if 'include' not in config: if not args: logging.error('No modules to sync specified as option or config.') sys.exit(1) config['include'] = [] [config['include'].append(a) for a in args] sync_options = None if 'options' in config: sync_options = config['options'] tmpd = tempfile.mkdtemp() try: checkout = clone_helpers(tmpd, config['branch']) sync_helpers(config['include'], checkout, config['destination'], options=sync_options) except Exception, e: logging.error("Could not sync: %s" % e) raise e finally: logging.debug('Cleaning up %s' % tmpd) shutil.rmtree(tmpd) charm-tools-2.1.2/charmtools/templates/ansible/__init__.py0000664000175000017500000000141212650157641024076 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import AnsibleCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/python_services/0000775000175000017500000000000012677251067023604 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/template.py0000664000175000017500000000423112650157641025763 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class PythonServicesCharmTemplate(CharmTemplate): """Creates a python-based charm using the services framework""" def create_charm(self, config, output_dir): self._copy_files(output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if self.skip_template(outfile): continue self._template_file(config, path.join(root, outfile)) def _copy_files(self, output_dir): here = path.abspath(path.dirname(__file__)) template_dir = path.join(here, 'files') if os.path.exists(output_dir): shutil.rmtree(output_dir) shutil.copytree(template_dir, output_dir) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) charm-tools-2.1.2/charmtools/templates/python_services/config.yaml0000664000175000017500000000000312650157641025720 0ustar marcomarco00000000000000{} charm-tools-2.1.2/charmtools/templates/python_services/files/0000775000175000017500000000000012677251067024706 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/0000775000175000017500000000000012677251067026031 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/start0000775000175000017500000000006412650157641027106 0ustar marcomarco00000000000000#!/usr/bin/python import services services.manage() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/upgrade-charm0000775000175000017500000000006412650157641030470 0ustar marcomarco00000000000000#!/usr/bin/python import services services.manage() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/setup.py0000664000175000017500000000070012650157641027532 0ustar marcomarco00000000000000def pre_install(): """ Do any setup required before the install hook. """ install_charmhelpers() def install_charmhelpers(): """ Install the charmhelpers library, if not present. """ try: import charmhelpers # noqa except ImportError: import subprocess subprocess.check_call(['apt-get', 'install', '-y', 'python-pip']) subprocess.check_call(['pip', 'install', 'charmhelpers']) charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/config-changed0000775000175000017500000000006412650157641030605 0ustar marcomarco00000000000000#!/usr/bin/python import services services.manage() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/install0000775000175000017500000000057212650157641027423 0ustar marcomarco00000000000000#!/usr/bin/python import setup setup.pre_install() from charmhelpers.core import hookenv def install(): hookenv.log('Installing $metadata.package') # add steps for installing dependencies and packages here # e.g.: from charmhelpers import fetch # fetch.apt_install(fetch.filter_installed_packages(['nginx'])) if __name__ == "__main__": install() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/services.py0000664000175000017500000000164312650157641030224 0ustar marcomarco00000000000000#!/usr/bin/python from charmhelpers.core.services.base import ServiceManager from charmhelpers.core.services import helpers import actions def manage(): manager = ServiceManager([ { 'service': '$metadata.package', 'ports': [], # ports to after start 'provided_data': [ # context managers for provided relations # e.g.: helpers.HttpRelation() ], 'required_data': [ # data (contexts) required to start the service # e.g.: helpers.RequiredConfig('domain', 'auth_key'), # helpers.MysqlRelation(), ], 'data_ready': [ helpers.render_template( source='upstart.conf', target='/etc/init/$metadata.package'), actions.log_start, ], }, ]) manager.manage() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/stop0000775000175000017500000000006412650157641026736 0ustar marcomarco00000000000000#!/usr/bin/python import services services.manage() charm-tools-2.1.2/charmtools/templates/python_services/files/hooks/actions.py0000664000175000017500000000016312650157641030035 0ustar marcomarco00000000000000from charmhelpers.core import hookenv def log_start(service_name): hookenv.log('$metadata.package starting') charm-tools-2.1.2/charmtools/templates/python_services/files/unit_tests/0000775000175000017500000000000012677251067027107 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/files/unit_tests/test_actions.py0000775000175000017500000000077712650157641032170 0ustar marcomarco00000000000000#!/usr/bin/env python import sys import mock import unittest from pkg_resources import resource_filename # allow importing actions from the hooks directory sys.path.append(resource_filename(__name__, '../hooks')) import actions class TestActions(unittest.TestCase): @mock.patch('charmhelpers.core.hookenv.log') def test_log_start(self, log): actions.log_start('test-service') log.assert_called_once_with('$metadata.package starting') if __name__ == '__main__': unittest.main() charm-tools-2.1.2/charmtools/templates/python_services/files/config.yaml0000664000175000017500000000056712650157641027041 0ustar marcomarco00000000000000options: string-option: type: string default: "Default Value" description: "A short description of the configuration option" boolean-option: type: boolean default: False description: "A short description of the configuration option" int-option: type: int default: 9001 description: "A short description of the configuration option" charm-tools-2.1.2/charmtools/templates/python_services/files/tests/0000775000175000017500000000000012677251067026050 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/files/tests/10-deploy0000775000175000017500000000402712650157641027505 0ustar marcomarco00000000000000#!/usr/bin/env python3 import amulet import requests import unittest class TestDeployment(unittest.TestCase): @classmethod def setUpClass(cls): cls.deployment = amulet.Deployment() cls.deployment.add('$metadata.package') cls.deployment.expose('$metadata.package') try: cls.deployment.setup(timeout=900) cls.deployment.sentry.wait() except amulet.helpers.TimeoutError: amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") except: raise cls.unit = cls.deployment.sentry.unit['$metadata.package/0'] def test_case(self): # Now you can use self.deployment.sentry.unit[UNIT] to address each of # the units and perform more in-depth steps. You can also reference # the first unit as self.unit. # # There are three test statuses that can be triggered with # amulet.raise_status(): # - amulet.PASS # - amulet.FAIL # - amulet.SKIP # # Each unit has the following methods: # - .info - An array of the information of that unit from Juju # - .file(PATH) - Get the details of a file on that unit # - .file_contents(PATH) - Get plain text output of PATH file from that unit # - .directory(PATH) - Get details of directory # - .directory_contents(PATH) - List files and folders in PATH on that unit # - .relation(relation, service:rel) - Get relation data from return service # add tests here to confirm service is up and working properly # # For example, to confirm that it has a functioning HTTP server: # # page = requests.get('http://{}'.format(self.unit.info['public-address'])) # page.raise_for_status() # # More information on writing Amulet tests can be found at: # # https://jujucharms.com/docs/stable/tools-amulet pass if __name__ == '__main__': unittest.main() charm-tools-2.1.2/charmtools/templates/python_services/files/tests/00-setup0000775000175000017500000000017312650157641027346 0ustar marcomarco00000000000000#!/bin/bash sudo add-apt-repository ppa:juju/stable -y sudo apt-get update sudo apt-get install amulet python-requests -y charm-tools-2.1.2/charmtools/templates/python_services/files/metadata.yaml0000664000175000017500000000072012650157641027343 0ustar marcomarco00000000000000name: $metadata.package summary: $metadata.summary maintainer: $metadata.maintainer description: | $metadata.description tags: # Replace "misc" with one or more whitelisted tags from this list: # https://jujucharms.com/docs/stable/authors-charm-metadata - misc subordinate: false provides: provides-relation: interface: interface-name requires: requires-relation: interface: interface-name peers: peer-relation: interface: interface-name charm-tools-2.1.2/charmtools/templates/python_services/files/README.example0000664000175000017500000000411212650157641027210 0ustar marcomarco00000000000000# Overview Describe the intended usage of this charm and anything unique about how this charm relates to others here. This README will be displayed in the Charm Store, it should be either Markdown or RST. Ideal READMEs include instructions on how to use the charm, expected usage, and charm features that your audience might be interested in. For an example of a well written README check out Hadoop: http://jujucharms.com/charms/precise/hadoop Use this as a Markdown reference if you need help with the formatting of this README: http://askubuntu.com/editing-help This charm provides [service](http://example.com). Add a description here of what the service itself actually does. Also remember to check the [icon guidelines](https://jujucharms.com/docs/stable/authors-charm-icon) so that your charm looks good in the Juju GUI. # Usage Step by step instructions on using the charm: juju deploy servicename and so on. If you're providing a web service or something that the end user needs to go to, tell them here, especially if you're deploying a service that might listen to a non-default port. You can then browse to http://ip-address to configure the service. ## Scale out Usage If the charm has any recommendations for running at scale, outline them in examples here. For example if you have a memcached relation that improves performance, mention it here. ## Known Limitations and Issues This not only helps users but gives people a place to start if they want to help you add features to your charm. # Configuration The configuration options will be listed on the charm store, however If you're making assumptions or opinionated decisions in the charm (like setting a default administrator password), you should detail that here so the user knows how to change it immediately, etc. # Contact Information Though this will be listed in the charm store itself don't assume a user will know that, so include that information here: ## Upstream Project Name - Upstream website - Upstream bug tracker - Upstream mailing list or contact information - Feel free to add things if it's useful for users charm-tools-2.1.2/charmtools/templates/python_services/files/icon.svg0000664000175000017500000002361312650157641026356 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/python_services/files/templates/0000775000175000017500000000000012677251067026704 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python_services/files/templates/upstart.conf0000664000175000017500000000032412650157641031246 0ustar marcomarco00000000000000description "$metadata.package" author "$metadata.maintainer" start on runlevel [2345] stop on runlevel [016] respawn console log script echo Fake service; sleeping for an hour... sleep 360 end script charm-tools-2.1.2/charmtools/templates/python_services/__init__.py0000664000175000017500000000150612650157641025711 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import PythonServicesCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/reactive_bash/0000775000175000017500000000000012677251067023157 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/reactive_bash/template.py0000664000175000017500000000637012666071477025355 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import subprocess import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class ReactiveBashCharmTemplate(CharmTemplate): """Creates a reactive, layered bash-based charm""" # _EXTRA_FILES is the list of names of files present in the git repo # we don't want transferred over to the charm template: _EXTRA_FILES = ["README.md", ".git", ".gitmodules"] _TEMPLATE_URL = "https://github.com/juju-solutions/template-reactive-bash" def create_charm(self, config, output_dir): self._clone_template(config, output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if self.skip_template(outfile): continue self._template_file(config, path.join(root, outfile)) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) def _clone_template(self, config, output_dir): cmd = "git clone --recursive {} {}".format( self._TEMPLATE_URL, output_dir ) try: subprocess.check_call(cmd.split()) except OSError as e: raise Exception( "The below error has occurred whilst attempting to clone" "the charm template. Please make sure you have git" "installed on your system.\n" + e ) # iterate and remove all the unwanted files from the git repo: for item in [path.join(output_dir, i) for i in self._EXTRA_FILES]: if not path.exists(item): continue if path.isdir(item) and not path.islink(item): shutil.rmtree(item) else: os.remove(item) # rename handlers.sh to .sh new_name = '%s.sh' % config['metadata']['package'].replace('-', '_') os.rename(os.path.join(output_dir, 'reactive', 'handlers.sh'), os.path.join(output_dir, 'reactive', new_name)) charm-tools-2.1.2/charmtools/templates/reactive_bash/__init__.py0000664000175000017500000000150412666071477025273 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import ReactiveBashCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/powershell/0000775000175000017500000000000012677251067022544 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/powershell/template.py0000664000175000017500000000420612670117067024725 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2016 Canonical Ltd. # Copyright (C) 2016 Cloudbase Solutions SRL # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import os.path as path import shutil import subprocess from charmtools.generators import CharmTemplate class PowerShellCharmTemplate(CharmTemplate): """ CharmTemplate specific to PowerShell charms. """ # _EXTRA_FILES is the list of names of files present in the git repo # we don't want transferred over to the charm template: _EXTRA_FILES = ["README.md", ".git", ".gitmodules"] _TEMPLATE_URL = "https://github.com/cloudbase/windows-charms-boilerplate" def __init__(self): self.skip_parsing += ["*.ps1", "*.psm1"] def create_charm(self, config, output_dir): cmd = "git clone --recursive {} {}".format( self._TEMPLATE_URL, output_dir ) try: subprocess.check_call(cmd.split()) except OSError as e: raise Exception( "The below error has ocurred whilst attempting to clone" "the powershell charm template. Please make sure you have" "git installed on your system.\n" + e ) # iterate and remove all the unwanted files from the git repo: for item in [path.join(output_dir, i) for i in self._EXTRA_FILES]: if not path.exists(item): continue if path.isdir(item) and not path.islink(item): shutil.rmtree(item) else: os.remove(item) charm-tools-2.1.2/charmtools/templates/powershell/__init__.py0000664000175000017500000000147512670117067024656 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2016 Canonical Ltd. # Copyright (C) 2016 Cloudbase Solutions SRL # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import PowerShellCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/python/0000775000175000017500000000000012677251067021701 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python/template.py0000664000175000017500000000500412650157641024057 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import subprocess import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class PythonCharmTemplate(CharmTemplate): """Creates a python-based charm""" def create_charm(self, config, output_dir): self._copy_files(output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if self.skip_template(outfile): continue self._template_file(config, path.join(root, outfile)) self._install_charmhelpers(output_dir) def _copy_files(self, output_dir): here = path.abspath(path.dirname(__file__)) template_dir = path.join(here, 'files') if os.path.exists(output_dir): shutil.rmtree(output_dir) shutil.copytree(template_dir, output_dir) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) def _install_charmhelpers(self, output_dir): helpers_dest = os.path.join(output_dir, 'lib', 'charmhelpers') if not os.path.exists(helpers_dest): os.makedirs(helpers_dest) cmd = './scripts/charm_helpers_sync.py -c charm-helpers.yaml' subprocess.check_call(cmd.split(), cwd=output_dir) charm-tools-2.1.2/charmtools/templates/python/files/0000775000175000017500000000000012677251067023003 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python/files/hooks/0000775000175000017500000000000012677251067024126 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python/files/hooks/start0000775000175000017500000000071412650157641025205 0ustar marcomarco00000000000000#!/usr/bin/python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) from charmhelpers.core import ( hookenv, host, ) hooks = hookenv.Hooks() log = hookenv.log SERVICE = '$metadata.package' @hooks.hook('start') def start(): host.service_restart(SERVICE) or host.service_start(SERVICE) if __name__ == "__main__": # execute a hook based on the name the program is called by hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/python/files/hooks/upgrade-charm0000775000175000017500000000070212650157641026564 0ustar marcomarco00000000000000#!/usr/bin/python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) from charmhelpers.core import ( hookenv, host, ) hooks = hookenv.Hooks() log = hookenv.log SERVICE = '$metadata.package' @hooks.hook('upgrade-charm') def upgrade_charm(): log('Upgrading $metadata.package') if __name__ == "__main__": # execute a hook based on the name the program is called by hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/python/files/hooks/config-changed0000775000175000017500000000124112650157641026700 0ustar marcomarco00000000000000#!/usr/bin/python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) from charmhelpers.core import ( hookenv, host, ) from start import start hooks = hookenv.Hooks() log = hookenv.log SERVICE = '$metadata.package' @hooks.hook('config-changed') def config_changed(): config = hookenv.config() for key in config: if config.changed(key): log("config['{}'] changed from {} to {}".format( key, config.previous(key), config[key])) config.save() start() if __name__ == "__main__": # execute a hook based on the name the program is called by hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/python/files/hooks/install0000775000175000017500000000066712650157641025525 0ustar marcomarco00000000000000#!/usr/bin/python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) from charmhelpers.core import ( hookenv, host, ) hooks = hookenv.Hooks() log = hookenv.log SERVICE = '$metadata.package' @hooks.hook('install') def install(): log('Installing $metadata.package') if __name__ == "__main__": # execute a hook based on the name the program is called by hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/python/files/hooks/stop0000775000175000017500000000065012650157641025034 0ustar marcomarco00000000000000#!/usr/bin/python import os import sys sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) from charmhelpers.core import ( hookenv, host, ) hooks = hookenv.Hooks() log = hookenv.log SERVICE = '$metadata.package' @hooks.hook('stop') def stop(): host.service_stop(SERVICE) if __name__ == "__main__": # execute a hook based on the name the program is called by hooks.execute(sys.argv) charm-tools-2.1.2/charmtools/templates/python/files/README.ex0000664000175000017500000000411212650157641024266 0ustar marcomarco00000000000000# Overview Describe the intended usage of this charm and anything unique about how this charm relates to others here. This README will be displayed in the Charm Store, it should be either Markdown or RST. Ideal READMEs include instructions on how to use the charm, expected usage, and charm features that your audience might be interested in. For an example of a well written README check out Hadoop: http://jujucharms.com/charms/precise/hadoop Use this as a Markdown reference if you need help with the formatting of this README: http://askubuntu.com/editing-help This charm provides [service](http://example.com). Add a description here of what the service itself actually does. Also remember to check the [icon guidelines](https://jujucharms.com/docs/stable/authors-charm-icon) so that your charm looks good in the Juju GUI. # Usage Step by step instructions on using the charm: juju deploy servicename and so on. If you're providing a web service or something that the end user needs to go to, tell them here, especially if you're deploying a service that might listen to a non-default port. You can then browse to http://ip-address to configure the service. ## Scale out Usage If the charm has any recommendations for running at scale, outline them in examples here. For example if you have a memcached relation that improves performance, mention it here. ## Known Limitations and Issues This not only helps users but gives people a place to start if they want to help you add features to your charm. # Configuration The configuration options will be listed on the charm store, however If you're making assumptions or opinionated decisions in the charm (like setting a default administrator password), you should detail that here so the user knows how to change it immediately, etc. # Contact Information Though this will be listed in the charm store itself don't assume a user will know that, so include that information here: ## Upstream Project Name - Upstream website - Upstream bug tracker - Upstream mailing list or contact information - Feel free to add things if it's useful for users charm-tools-2.1.2/charmtools/templates/python/files/charm-helpers.yaml0000664000175000017500000000011112650157641026404 0ustar marcomarco00000000000000destination: lib/charmhelpers branch: lp:charm-helpers include: - core charm-tools-2.1.2/charmtools/templates/python/files/config.yaml0000664000175000017500000000056712650157641025136 0ustar marcomarco00000000000000options: string-option: type: string default: "Default Value" description: "A short description of the configuration option" boolean-option: type: boolean default: False description: "A short description of the configuration option" int-option: type: int default: 9001 description: "A short description of the configuration option" charm-tools-2.1.2/charmtools/templates/python/files/revision0000664000175000017500000000000212650157641024546 0ustar marcomarco000000000000001 charm-tools-2.1.2/charmtools/templates/python/files/tests/0000775000175000017500000000000012677251067024145 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python/files/tests/10-deploy0000775000175000017500000000222012650157641025573 0ustar marcomarco00000000000000#!/usr/bin/python3 import amulet import requests d = amulet.Deployment() d.add('$metadata.package') d.expose('$metadata.package') try: d.setup(timeout=900) d.sentry.wait() except amulet.helpers.TimeoutError: amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") except: raise unit = d.sentry.unit['$metadata.package/0'] # test we can access over http page = requests.get('http://{}'.format(unit.info['public-address'])) page.raise_for_status() # Now you can use d.sentry.unit[UNIT] to address each of the units and perform # more in-depth steps. There are three test statuses: amulet.PASS, amulet.FAIL, # and amulet.SKIP - these can be triggered with amulet.raise_status(). Each # d.sentry.unit[] has the following methods: # - .info - An array of the information of that unit from Juju # - .file(PATH) - Get the details of a file on that unit # - .file_contents(PATH) - Get plain text output of PATH file from that unit # - .directory(PATH) - Get details of directory # - .directory_contents(PATH) - List files and folders in PATH on that unit # - .relation(relation, service:rel) - Get relation data from return service charm-tools-2.1.2/charmtools/templates/python/files/tests/00-setup0000775000175000017500000000017312650157641025443 0ustar marcomarco00000000000000#!/bin/bash sudo add-apt-repository ppa:juju/stable -y sudo apt-get update sudo apt-get install amulet python-requests -y charm-tools-2.1.2/charmtools/templates/python/files/metadata.yaml0000664000175000017500000000072012650157641025440 0ustar marcomarco00000000000000name: $metadata.package summary: $metadata.summary maintainer: $metadata.maintainer description: | $metadata.description tags: # Replace "misc" with one or more whitelisted tags from this list: # https://jujucharms.com/docs/stable/authors-charm-metadata - misc subordinate: false provides: provides-relation: interface: interface-name requires: requires-relation: interface: interface-name peers: peer-relation: interface: interface-name charm-tools-2.1.2/charmtools/templates/python/files/icon.svg0000664000175000017500000002361312650157641024453 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/python/files/scripts/0000775000175000017500000000000012677251067024472 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/python/files/scripts/charm_helpers_sync.py0000775000175000017500000001577612670117067030731 0ustar marcomarco00000000000000#!/usr/bin/env python # # Copyright 2013 Canonical Ltd. # Authors: # Adam Gandelman # import logging import optparse import os import subprocess import shutil import sys import tempfile import yaml from fnmatch import fnmatch CHARM_HELPERS_BRANCH = 'lp:charm-helpers' def parse_config(conf_file): if not os.path.isfile(conf_file): logging.error('Invalid config file: %s.' % conf_file) return False return yaml.load(open(conf_file).read()) def clone_helpers(work_dir, branch): dest = os.path.join(work_dir, 'charm-helpers') logging.info('Checking out %s to %s.' % (branch, dest)) cmd = ['bzr', 'branch', branch, dest] subprocess.check_call(cmd) return dest def _module_path(module): return os.path.join(*module.split('.')) def _src_path(src, module): return os.path.join(src, 'charmhelpers', _module_path(module)) def _dest_path(dest, module): return os.path.join(dest, _module_path(module)) def _is_pyfile(path): return os.path.isfile(path + '.py') def ensure_init(path): ''' ensure directories leading up to path are importable, omitting parent directory, eg path='/hooks/helpers/foo/': hooks/ hooks/helpers/__init__.py hooks/helpers/foo/__init__.py ''' for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): _i = os.path.join(d, '__init__.py') if not os.path.exists(_i): logging.info('Adding missing __init__.py: %s' % _i) open(_i, 'wb').close() def sync_pyfile(src, dest): src = src + '.py' src_dir = os.path.dirname(src) logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) if not os.path.exists(dest): os.makedirs(dest) shutil.copy(src, dest) if os.path.isfile(os.path.join(src_dir, '__init__.py')): shutil.copy(os.path.join(src_dir, '__init__.py'), dest) ensure_init(dest) def get_filter(opts=None): opts = opts or [] if 'inc=*' in opts: # do not filter any files, include everything return None def _filter(dir, ls): incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] _filter = [] for f in ls: _f = os.path.join(dir, f) if not os.path.isdir(_f) and not _f.endswith('.py') and incs: if True not in [fnmatch(_f, inc) for inc in incs]: logging.debug('Not syncing %s, does not match include ' 'filters (%s)' % (_f, incs)) _filter.append(f) else: logging.debug('Including file, which matches include ' 'filters (%s): %s' % (incs, _f)) elif (os.path.isfile(_f) and not _f.endswith('.py')): logging.debug('Not syncing file: %s' % f) _filter.append(f) elif (os.path.isdir(_f) and not os.path.isfile(os.path.join(_f, '__init__.py'))): logging.debug('Not syncing directory: %s' % f) _filter.append(f) return _filter return _filter def sync_directory(src, dest, opts=None): if os.path.exists(dest): logging.debug('Removing existing directory: %s' % dest) shutil.rmtree(dest) logging.info('Syncing directory: %s -> %s.' % (src, dest)) shutil.copytree(src, dest, ignore=get_filter(opts)) ensure_init(dest) def sync(src, dest, module, opts=None): if os.path.isdir(_src_path(src, module)): sync_directory(_src_path(src, module), _dest_path(dest, module), opts) elif _is_pyfile(_src_path(src, module)): sync_pyfile(_src_path(src, module), os.path.dirname(_dest_path(dest, module))) else: logging.warn('Could not sync: %s. Neither a pyfile or directory, ' 'does it even exist?' % module) def parse_sync_options(options): if not options: return [] return options.split(',') def extract_options(inc, global_options=None): global_options = global_options or [] if global_options and isinstance(global_options, basestring): global_options = [global_options] if '|' not in inc: return (inc, global_options) inc, opts = inc.split('|') return (inc, parse_sync_options(opts) + global_options) def sync_helpers(include, src, dest, options=None): if not os.path.isdir(dest): os.mkdir(dest) global_options = parse_sync_options(options) for inc in include: if isinstance(inc, str): inc, opts = extract_options(inc, global_options) sync(src, dest, inc, opts) elif isinstance(inc, dict): # could also do nested dicts here. for k, v in inc.iteritems(): if isinstance(v, list): for m in v: inc, opts = extract_options(m, global_options) sync(src, dest, '%s.%s' % (k, inc), opts) if __name__ == '__main__': parser = optparse.OptionParser() parser.add_option('-c', '--config', action='store', dest='config', default=None, help='helper config file') parser.add_option('-D', '--debug', action='store_true', dest='debug', default=False, help='debug') parser.add_option('-b', '--branch', action='store', dest='branch', help='charm-helpers bzr branch (overrides config)') parser.add_option('-d', '--destination', action='store', dest='dest_dir', help='sync destination dir (overrides config)') (opts, args) = parser.parse_args() if opts.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if opts.config: logging.info('Loading charm helper config from %s.' % opts.config) config = parse_config(opts.config) if not config: logging.error('Could not parse config from %s.' % opts.config) sys.exit(1) else: config = {} if 'branch' not in config: config['branch'] = CHARM_HELPERS_BRANCH if opts.branch: config['branch'] = opts.branch if opts.dest_dir: config['destination'] = opts.dest_dir if 'destination' not in config: logging.error('No destination dir. specified as option or config.') sys.exit(1) if 'include' not in config: if not args: logging.error('No modules to sync specified as option or config.') sys.exit(1) config['include'] = [] [config['include'].append(a) for a in args] sync_options = None if 'options' in config: sync_options = config['options'] tmpd = tempfile.mkdtemp() try: checkout = clone_helpers(tmpd, config['branch']) sync_helpers(config['include'], checkout, config['destination'], options=sync_options) except Exception, e: logging.error("Could not sync: %s" % e) raise e finally: logging.debug('Cleaning up %s' % tmpd) shutil.rmtree(tmpd) charm-tools-2.1.2/charmtools/templates/python/__init__.py0000664000175000017500000000147612650157641024014 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import PythonCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/bash/0000775000175000017500000000000012677251067021275 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/bash/template.py0000664000175000017500000000423412650157641023457 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import CharmTemplate log = logging.getLogger(__name__) class BashCharmTemplate(CharmTemplate): """Creates a bash-based charm""" def create_charm(self, config, output_dir): self._copy_files(output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if self.skip_template(outfile): continue self._template_file(config, path.join(root, outfile)) def _copy_files(self, output_dir): here = path.abspath(path.dirname(__file__)) template_dir = path.join(here, 'files') if os.path.exists(output_dir): shutil.rmtree(output_dir) shutil.copytree(template_dir, output_dir) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) charm-tools-2.1.2/charmtools/templates/bash/files/0000775000175000017500000000000012677251067022377 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/bash/files/hooks/0000775000175000017500000000000012677251067023522 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/bash/files/hooks/relation-name-relation-changed0000775000175000017500000000053112650157641031376 0ustar marcomarco00000000000000#!/bin/bash # # This must be renamed to the name of the relation. The goal here is to # affect any change needed by relationships being formed, modified, or broken # This script should be idempotent. #raw juju-log $JUJU_REMOTE_UNIT modified its settings juju-log Relation settings: relation-get juju-log Relation members: relation-list #end raw charm-tools-2.1.2/charmtools/templates/bash/files/hooks/start0000775000175000017500000000023512650157641024577 0ustar marcomarco00000000000000#!/bin/bash # Here put anything that is needed to start the service. # Note that currently this is run directly after install # i.e. 'service apache2 start' charm-tools-2.1.2/charmtools/templates/bash/files/hooks/upgrade-charm0000775000175000017500000000035512650157641026164 0ustar marcomarco00000000000000#!/bin/bash # This hook is executed each time a charm is upgraded after the new charm # contents have been unpacked # # Best practice suggests you execute the hooks/install and # hooks/config-changed to ensure all updates are processed charm-tools-2.1.2/charmtools/templates/bash/files/hooks/relation-name-relation-departed0000775000175000017500000000036012650157641031575 0ustar marcomarco00000000000000#!/bin/sh # This must be renamed to the name of the relation. The goal here is to # affect any change needed by the remote unit leaving the relationship. # This script should be idempotent. #raw juju-log $JUJU_REMOTE_UNIT departed #end raw charm-tools-2.1.2/charmtools/templates/bash/files/hooks/config-changed0000775000175000017500000000013612650157641026276 0ustar marcomarco00000000000000#!/bin/bash # config-changed occurs everytime a new configuration value is updated (juju set) charm-tools-2.1.2/charmtools/templates/bash/files/hooks/install0000775000175000017500000000060412650157641025110 0ustar marcomarco00000000000000#!/bin/bash # # Here do anything needed to install the service # i.e. apt-get install -y foo or bzr branch http://myserver/mycode /srv/webroot # # Make sure this hook exits cleanly and is idempotent, common problems here are # failing to account for a debconf question on a dependency, or trying to pull # from github without installing git first. apt-get install -y $metadata.package charm-tools-2.1.2/charmtools/templates/bash/files/hooks/relation-name-relation-joined0000775000175000017500000000033712650157641031261 0ustar marcomarco00000000000000#!/bin/sh # This must be renamed to the name of the relation. The goal here is to # affect any change needed by relationships being formed # This script should be idempotent. #raw juju-log $JUJU_REMOTE_UNIT joined #end raw charm-tools-2.1.2/charmtools/templates/bash/files/hooks/stop0000775000175000017500000000054312650157641024431 0ustar marcomarco00000000000000#!/bin/bash # This will be run when the service is being torn down, allowing you to disable # it in various ways.. # For example, if your web app uses a text file to signal to the load balancer # that it is live... you could remove it and sleep for a bit to allow the load # balancer to stop sending traffic. # rm /srv/webroot/server-live.txt && sleep 30 charm-tools-2.1.2/charmtools/templates/bash/files/hooks/relation-name-relation-broken0000775000175000017500000000013012650157641031260 0ustar marcomarco00000000000000#!/bin/sh # This hook runs when the full relation is removed (not just a single member) charm-tools-2.1.2/charmtools/templates/bash/files/README.ex0000664000175000017500000000411212650157641023662 0ustar marcomarco00000000000000# Overview Describe the intended usage of this charm and anything unique about how this charm relates to others here. This README will be displayed in the Charm Store, it should be either Markdown or RST. Ideal READMEs include instructions on how to use the charm, expected usage, and charm features that your audience might be interested in. For an example of a well written README check out Hadoop: http://jujucharms.com/charms/precise/hadoop Use this as a Markdown reference if you need help with the formatting of this README: http://askubuntu.com/editing-help This charm provides [service](http://example.com). Add a description here of what the service itself actually does. Also remember to check the [icon guidelines](https://jujucharms.com/docs/stable/authors-charm-icon) so that your charm looks good in the Juju GUI. # Usage Step by step instructions on using the charm: juju deploy servicename and so on. If you're providing a web service or something that the end user needs to go to, tell them here, especially if you're deploying a service that might listen to a non-default port. You can then browse to http://ip-address to configure the service. ## Scale out Usage If the charm has any recommendations for running at scale, outline them in examples here. For example if you have a memcached relation that improves performance, mention it here. ## Known Limitations and Issues This not only helps users but gives people a place to start if they want to help you add features to your charm. # Configuration The configuration options will be listed on the charm store, however If you're making assumptions or opinionated decisions in the charm (like setting a default administrator password), you should detail that here so the user knows how to change it immediately, etc. # Contact Information Though this will be listed in the charm store itself don't assume a user will know that, so include that information here: ## Upstream Project Name - Upstream website - Upstream bug tracker - Upstream mailing list or contact information - Feel free to add things if it's useful for users charm-tools-2.1.2/charmtools/templates/bash/files/config.yaml0000664000175000017500000000056712650157641024532 0ustar marcomarco00000000000000options: string-option: type: string default: "Default Value" description: "A short description of the configuration option" boolean-option: type: boolean default: False description: "A short description of the configuration option" int-option: type: int default: 9001 description: "A short description of the configuration option" charm-tools-2.1.2/charmtools/templates/bash/files/revision0000664000175000017500000000000212650157641024142 0ustar marcomarco000000000000001 charm-tools-2.1.2/charmtools/templates/bash/files/metadata.yaml0000664000175000017500000000072012650157641025034 0ustar marcomarco00000000000000name: $metadata.package summary: $metadata.summary maintainer: $metadata.maintainer description: | $metadata.description tags: # Replace "misc" with one or more whitelisted tags from this list: # https://jujucharms.com/docs/stable/authors-charm-metadata - misc subordinate: false provides: provides-relation: interface: interface-name requires: requires-relation: interface: interface-name peers: peer-relation: interface: interface-name charm-tools-2.1.2/charmtools/templates/bash/files/icon.svg0000664000175000017500000002361312650157641024047 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/bash/__init__.py0000664000175000017500000000147412650157641023406 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import BashCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/tests/0000775000175000017500000000000012677251067021522 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/tests/99-autogen.tpl0000664000175000017500000000401012650157641024131 0ustar marcomarco00000000000000#!/usr/bin/env python3 import amulet import requests import unittest class TestDeployment(unittest.TestCase): @classmethod def setUpClass(cls): cls.deployment = amulet.Deployment(series='$series') #for $s in $deploy cls.deployment.add('$s') #end for #for $r in $relate cls.deployment.relate('$r[0]', '$r[1]') #end for try: cls.deployment.setup(timeout=900) cls.deployment.sentry.wait() except amulet.helpers.TimeoutError: amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") except: raise def test_case(self): # Now you can use self.deployment.sentry.unit[UNIT] to address each of # the units and perform more in-depth steps. You can also reference # the first unit as self.unit. # # There are three test statuses that can be triggered with # amulet.raise_status(): # - amulet.PASS # - amulet.FAIL # - amulet.SKIP # # Each unit has the following methods: # - .info - An array of the information of that unit from Juju # - .file(PATH) - Get the details of a file on that unit # - .file_contents(PATH) - Get plain text output of PATH file from that unit # - .directory(PATH) - Get details of directory # - .directory_contents(PATH) - List files and folders in PATH on that unit # - .relation(relation, service:rel) - Get relation data from return service # add tests here to confirm service is up and working properly # # For example, to confirm that it has a functioning HTTP server: # # page = requests.get('http://{}'.format(self.unit.info['public-address'])) # page.raise_for_status() # # More information on writing Amulet tests can be found at: # # https://jujucharms.com/docs/stable/tools-amulet pass if __name__ == '__main__': unittest.main() charm-tools-2.1.2/charmtools/templates/charm/0000775000175000017500000000000012677251067021452 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/charm/README.ex0000664000175000017500000000411212650157641022735 0ustar marcomarco00000000000000# Overview Describe the intended usage of this charm and anything unique about how this charm relates to others here. This README will be displayed in the Charm Store, it should be either Markdown or RST. Ideal READMEs include instructions on how to use the charm, expected usage, and charm features that your audience might be interested in. For an example of a well written README check out Hadoop: http://jujucharms.com/charms/precise/hadoop Use this as a Markdown reference if you need help with the formatting of this README: http://askubuntu.com/editing-help This charm provides [service](http://example.com). Add a description here of what the service itself actually does. Also remember to check the [icon guidelines](https://jujucharms.com/docs/stable/authors-charm-icon) so that your charm looks good in the Juju GUI. # Usage Step by step instructions on using the charm: juju deploy servicename and so on. If you're providing a web service or something that the end user needs to go to, tell them here, especially if you're deploying a service that might listen to a non-default port. You can then browse to http://ip-address to configure the service. ## Scale out Usage If the charm has any recommendations for running at scale, outline them in examples here. For example if you have a memcached relation that improves performance, mention it here. ## Known Limitations and Issues This not only helps users but gives people a place to start if they want to help you add features to your charm. # Configuration The configuration options will be listed on the charm store, however If you're making assumptions or opinionated decisions in the charm (like setting a default administrator password), you should detail that here so the user knows how to change it immediately, etc. # Contact Information Though this will be listed in the charm store itself don't assume a user will know that, so include that information here: ## Upstream Project Name - Upstream website - Upstream bug tracker - Upstream mailing list or contact information - Feel free to add things if it's useful for users charm-tools-2.1.2/charmtools/templates/charm/icon.svg0000664000175000017500000002361312650157641023122 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/__init__.py0000664000175000017500000000000012650157641022451 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/0000775000175000017500000000000012677251067021265 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/template.py0000664000175000017500000000471712650157641023455 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class ChefCharmTemplate(CharmTemplate): def create_charm(self, config, output_dir): cb_path = "cookbooks/{}".format(config['metadata']['package']) to_parse = ['metadata.yaml', 'metadata.rb', 'stub', '99-autogen'] self._copy_files(output_dir, cb_path) for root, dirs, files in os.walk(output_dir): for outfile in files: if outfile in to_parse: self._template_file(config, path.join(root, outfile)) def _copy_files(self, output_dir, cb_path): here = path.abspath(path.dirname(__file__)) template_dir = path.join(here, 'files') if os.path.exists(output_dir): shutil.rmtree(output_dir) shutil.copytree(template_dir, output_dir) self._setup_cookbook(output_dir, cb_path) def _setup_cookbook(self, output_dir, cb_path): outpath = "{}/cookbooks/charm-name".format(output_dir) cb_path = "{}/{}".format(output_dir, cb_path) shutil.move(outpath, cb_path) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) charm-tools-2.1.2/charmtools/templates/chef/files/0000775000175000017500000000000012677251067022367 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/hooks/0000775000175000017500000000000012677251067023512 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/hooks/relation-name-relation-changed0000775000175000017500000000005612650157641031370 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/start0000775000175000017500000000005612650157641024570 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/upgrade-charm0000775000175000017500000000005612650157641026152 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/relation-name-relation-departed0000775000175000017500000000005612650157641031567 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/config-changed0000775000175000017500000000005612650157641026267 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/install0000775000175000017500000000013312650157641025075 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/bootstrap cd $CHARM_DIR source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/stub0000775000175000017500000000056012650157641024410 0ustar marcomarco00000000000000#!/bin/bash set -e HOOK_NAME=\$(basename \$0) CHARM_NAME=$metadata.package if [[ "\$HOOK_NAME" == *-relation-* ]]; then COOKBOOK_NAME="\${HOOK_NAME%-relation*}-relation" RECIPE_NAME=\${HOOK_NAME\#*relation-} else COOKBOOK_NAME=\$CHARM_NAME RECIPE_NAME=\$HOOK_NAME fi cd "\$(dirname \$0)" && bundle exec chef-solo -o "recipe[\$COOKBOOK_NAME::\$RECIPE_NAME]" charm-tools-2.1.2/charmtools/templates/chef/files/hooks/relation-name-relation-joined0000775000175000017500000000005612650157641031247 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/stop0000775000175000017500000000005612650157641024420 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/relation-name-relation-broken0000775000175000017500000000005612650157641031257 0ustar marcomarco00000000000000#!/bin/bash set -e source $(dirname $0)/stub charm-tools-2.1.2/charmtools/templates/chef/files/hooks/bootstrap0000775000175000017500000000056512650157641025455 0ustar marcomarco00000000000000#!/bin/bash set -e apt-get update sudo apt-get install -y ruby1.9.1 ruby1.9.1-dev \ rubygems1.9.1 irb1.9.1 ri1.9.1 rdoc1.9.1 \ build-essential libopenssl-ruby1.9.1 libssl-dev zlib1g-dev gem install bundler --no-rdoc --no-ri cd "$(dirname $0)../cookbooks" && bundle install mkdir -p /etc/chef cat > /etc/chef/solo.rb < 0.6, >= 0.6.9) minitest (~> 4.2) multi_json (~> 1.3) thread_safe (~> 0.1) tzinfo (~> 0.3.37) chef (11.6.0) erubis highline (>= 1.6.9) json (>= 1.4.4, <= 1.7.7) mixlib-authentication (>= 1.3.0) mixlib-cli (~> 1.3.0) mixlib-config (>= 1.1.2) mixlib-log (>= 1.3.0) mixlib-shellout net-ssh (~> 2.6) net-ssh-multi (~> 1.1.0) ohai (>= 0.6.0) rest-client (>= 1.0.4, < 1.7.0) yajl-ruby (~> 1.1) erubis (2.7.0) highline (1.6.19) i18n (0.6.11) ipaddress (0.8.0) json (1.7.7) mime-types (1.23) minitest (4.7.5) mixlib-authentication (1.3.0) mixlib-log mixlib-cli (1.3.0) mixlib-config (1.1.2) mixlib-log (1.6.0) mixlib-shellout (1.2.0) multi_json (1.10.1) net-ssh (2.6.8) net-ssh-gateway (1.2.0) net-ssh (>= 2.6.5) net-ssh-multi (1.1) net-ssh (>= 2.1.4) net-ssh-gateway (>= 0.99.0) ohai (6.18.0) ipaddress mixlib-cli mixlib-config mixlib-log mixlib-shellout systemu yajl-ruby rest-client (1.6.7) mime-types (>= 1.16) systemu (2.5.2) thread_safe (0.3.4) tzinfo (0.3.41) yajl-ruby (1.1.0) PLATFORMS ruby DEPENDENCIES activesupport (~> 4.0.0) chef (~> 11.6.0) charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/Gemfile0000664000175000017500000000012612650157641025644 0ustar marcomarco00000000000000source "https://rubygems.org" gem 'chef', '~> 11.6.0' gem 'activesupport', '~> 4.0.0'charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/0000775000175000017500000000000012677251067030726 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/0000775000175000017500000000000012677251067032360 5ustar marcomarco00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/changed.rbcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/changed.r0000664000175000017500000000000012650157641034114 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/broken.rb0000664000175000017500000000000012650157641034145 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/joined.rb0000664000175000017500000000000012650157641034135 0ustar marcomarco00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/departed.rbcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/recipes/departed.0000664000175000017500000000000012650157641034131 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/relation-name-relation/metadata.rb0000664000175000017500000000036012650157641033024 0ustar marcomarco00000000000000maintainer "$metadata.maintainer" maintainer_email "$metadata.maintainer" license "GPL-3" description "$metadata.summary" version "0.1" name "relation-name-relation" depends "juju-helpers" charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/0000775000175000017500000000000012677251067026775 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/0000775000175000017500000000000012677251067030751 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/0000775000175000017500000000000012677251067031726 5ustar marcomarco00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers_dev.rbcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers0000664000175000017500000000046412650157641034346 0ustar marcomarco00000000000000module JujuHelpersDev def relation_ids(relation_name = nil) [] end def relation_list(relation_id = nil) {} end def relation_get(unit_name = nil, relation_id = nil) {} end def config_get {} end def unit_get(key) nil end def juju_log(text) puts text end end././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers.rbcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju/juju_helpers0000664000175000017500000000232212650157641034341 0ustar marcomarco00000000000000module JujuHelpers HOOK_ENVIRONMENT = %w(juju_unit_name juju_relation juju_remote_unit) COMMANDS = '' HOOK_ENVIRONMENT.each do |method| define_method method do ENV[method.upcase] end end def relation_ids(relation_name = nil) commands = ['relation-ids --fromat=json'] commands << relation_name if relation_name run(commands.join(' ')).try { |relations| JSON.load(relations) } end def relation_list(relation_id = nil) commands = ['relation-list --format=json'] commands << "-r #{relation_id}" if relation_id run(commands.join(' ')).try { |relations| JSON.load(relations) } end def relation_get(unit_name = nil, relation_id = nil) commands = ['relation-get --format=json'] commands << "-r #{relation_id}" if relation_id commands << '-' commands << unit_name if unit_name run(commands.join(' ')).try { |relation| JSON.load(relation) } end def config_get run("config-get --format=json").try { |relation| JSON.load(relation) } end def unit_get(key) run("unit-get #{key}") end def juju_log(text) run("juju-log #{text}") end private def run(command) value = %x{ #{command} 2>&1 }.strip value.empty? ? nil : value end endcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/libraries/juju.rb0000664000175000017500000000100412650157641032240 0ustar marcomarco00000000000000$: << File.expand_path('..', __FILE__) require 'active_support/all' require 'juju/juju_helpers' require 'juju/juju_helpers_dev' class Chef class Resource include JujuHelpers if ENV['JUJU_ENV'] == 'development' include JujuHelpersDev end end class Recipe include JujuHelpers if ENV['JUJU_ENV'] == 'development' include JujuHelpersDev end end class Provider include JujuHelpers if ENV['JUJU_ENV'] == 'development' include JujuHelpersDev end end endcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/metadata.rb0000664000175000017500000000030012650157641031065 0ustar marcomarco00000000000000maintainer "Charles Butler" maintainer_email "charles.butler@ubuntu.com" license "GPL-3" description "JuJu Helpers" version "0.2" name "juju-helpers" charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/definitions/0000775000175000017500000000000012677251067031310 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/definitions/relation_set.rb0000664000175000017500000000041512650157641034317 0ustar marcomarco00000000000000define :relation_set do args_string = params[:variables].map { |key, value| "#{key}=\"#{value}\"" }.join(' ') command = "relation-set #{args_string}" command += " -r #{params[:relation_id]}" if params[:relation_id] execute command do action :run end endcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/juju-helpers/definitions/juju_port.rb0000664000175000017500000000037512650157641033655 0ustar marcomarco00000000000000define :juju_port, action: :nothing do if params[:action] == :open execute "open-port #{params[:name]}" do action :run end elsif params[:action] == :close execute "close-port #{params[:name]}" do action :run end end endcharm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/0000775000175000017500000000000012677251067026370 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/0000775000175000017500000000000012677251067030022 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/upgrade-charm.rb0000664000175000017500000000000012650157641033046 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/config-changed.rb0000664000175000017500000000000012650157641033163 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/start.rb0000664000175000017500000000000012650157641031464 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/install.rb0000664000175000017500000000000012650157641031775 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/recipes/stop.rb0000664000175000017500000000000012650157641031314 0ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/cookbooks/charm-name/metadata.rb0000664000175000017500000000035312650157641030470 0ustar marcomarco00000000000000maintainer "$metadata.maintainer" maintainer_email "$metadata.maintainer" license "GPL-3" description "$metadata.summary" version "0.1" name "$metadata.package" depends "juju-helpers" charm-tools-2.1.2/charmtools/templates/chef/files/config.yaml0000664000175000017500000000056712650157641024522 0ustar marcomarco00000000000000options: string-option: type: string default: "Default Value" description: "A short description of the configuration option" boolean-option: type: boolean default: False description: "A short description of the configuration option" int-option: type: int default: 9001 description: "A short description of the configuration option" charm-tools-2.1.2/charmtools/templates/chef/files/tests/0000775000175000017500000000000012677251067023531 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/chef/files/tests/99-autogen0000775000175000017500000000364512650157641025362 0ustar marcomarco00000000000000#!/usr/bin/env python3 import amulet import requests import unittest class TestDeployment(unittest.TestCase): @classmethod def setUpClass(cls): cls.deployment = amulet.Deployment(series='trusty') cls.deployment.add('$metadata.package') cls.deployment.expose('$metadata.package') try: cls.deployment.setup(timeout=900) cls.deployment.sentry.wait() except amulet.helpers.TimeoutError: amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") except: raise def test_case(self): # Now you can use self.deployment.sentry.unit[UNIT] to address each of # the units and perform more in-depth steps. You can also reference # the first unit as self.unit. # There are three test statuses that can be triggered with # amulet.raise_status(): # - amulet.PASS # - amulet.FAIL # - amulet.SKIP # Each unit has the following methods: # - .info - An array of the information of that unit from Juju # - .file(PATH) - Get the details of a file on that unit # - .file_contents(PATH) - Get plain text output of PATH file from that unit # - .directory(PATH) - Get details of directory # - .directory_contents(PATH) - List files and folders in PATH on that unit # - .relation(relation, service:rel) - Get relation data from return service # add tests here to confirm service is up and working properly # For example, to confirm that it has a functioning HTTP server: # page = requests.get('http://{}'.format(self.unit.info['public-address'])) # page.raise_for_status() # More information on writing Amulet tests can be found at: # https://jujucharms.com/docs/stable/tools-amulet pass if __name__ == '__main__': unittest.main() charm-tools-2.1.2/charmtools/templates/chef/files/tests/00-setup0000775000175000017500000000017412650157641025030 0ustar marcomarco00000000000000#!/bin/bash sudo add-apt-repository -y ppa:juju/stable sudo apt-get update sudo apt-get install -y amulet python-requests charm-tools-2.1.2/charmtools/templates/chef/files/metadata.yaml0000664000175000017500000000051512650157641025026 0ustar marcomarco00000000000000name: $metadata.package summary: $metadata.summary maintainer: $metadata.maintainer description: | $metadata.description tags: - misc subordinate: false provides: provides-relation: interface: interface-name requires: requires-relation: interface: interface-name peers: peer-relation: interface: interface-name charm-tools-2.1.2/charmtools/templates/chef/files/icon.svg0000664000175000017500000002361312650157641024037 0ustar marcomarco00000000000000 image/svg+xml charm-tools-2.1.2/charmtools/templates/chef/__init__.py0000664000175000017500000000140712650157641023372 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import ChefCharmTemplate # noqa charm-tools-2.1.2/charmtools/templates/reactive_python/0000775000175000017500000000000012677251067023563 5ustar marcomarco00000000000000charm-tools-2.1.2/charmtools/templates/reactive_python/template.py0000664000175000017500000000637612666071477025767 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import logging import os import os.path as path import time import shutil import subprocess import tempfile from Cheetah.Template import Template from stat import ST_MODE from charmtools.generators import ( CharmTemplate, ) log = logging.getLogger(__name__) class ReactivePythonCharmTemplate(CharmTemplate): """Creates a reactive, layered python-based charm""" # _EXTRA_FILES is the list of names of files present in the git repo # we don't want transferred over to the charm template: _EXTRA_FILES = ["README.md", ".git", ".gitmodules"] _TEMPLATE_URL = "https://github.com/juju-solutions/template-reactive-python" def create_charm(self, config, output_dir): self._clone_template(config, output_dir) for root, dirs, files in os.walk(output_dir): for outfile in files: if self.skip_template(outfile): continue self._template_file(config, path.join(root, outfile)) def _template_file(self, config, outfile): if path.islink(outfile): return mode = os.stat(outfile)[ST_MODE] t = Template(file=outfile, searchList=(config)) o = tempfile.NamedTemporaryFile( dir=path.dirname(outfile), delete=False) os.chmod(o.name, mode) o.write(str(t)) o.close() backupname = outfile + str(time.time()) os.rename(outfile, backupname) os.rename(o.name, outfile) os.unlink(backupname) def _clone_template(self, config, output_dir): cmd = "git clone --recursive {} {}".format( self._TEMPLATE_URL, output_dir ) try: subprocess.check_call(cmd.split()) except OSError as e: raise Exception( "The below error has occurred whilst attempting to clone" "the charm template. Please make sure you have git" "installed on your system.\n" + e ) # iterate and remove all the unwanted files from the git repo: for item in [path.join(output_dir, i) for i in self._EXTRA_FILES]: if not path.exists(item): continue if path.isdir(item) and not path.islink(item): shutil.rmtree(item) else: os.remove(item) # rename handlers.py to .py new_name = '%s.py' % config['metadata']['package'].replace('-', '_') os.rename(os.path.join(output_dir, 'reactive', 'handlers.py'), os.path.join(output_dir, 'reactive', new_name)) charm-tools-2.1.2/charmtools/templates/reactive_python/__init__.py0000664000175000017500000000150612666071477025701 0ustar marcomarco00000000000000#!/usr/bin/python # # Copyright (C) 2014 Canonical Ltd. # Author: Clint Byrum # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .template import ReactivePythonCharmTemplate # noqa charm-tools-2.1.2/charmtools/proof.py0000775000175000017500000000375412676737527020106 0ustar marcomarco00000000000000#!/usr/bin/python # Copyright (C) 2011 - 2014 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import sys import argparse from bundles import Bundle from charms import Charm from cli import parser_defaults from charmtools import utils def get_args(args=None): parser = argparse.ArgumentParser( description='perform static analysis on a charm or bundle') parser.add_argument('charm_name', nargs='?', default=os.getcwd(), help='path of charm dir to check. Defaults to PWD') utils.add_plugin_description(parser) parser = parser_defaults(parser) args = parser.parse_args(args) return args def proof(path, is_bundle, debug): path = os.path.abspath(path) if not is_bundle: try: c = Charm(path) except: try: c = Bundle(path, debug) except Exception as e: return ["FATAL: Not a Bundle or a Charm, can not proof"], 200 else: try: c = Bundle(path, debug) except Exception as e: return ["FATAL: %s" % e.message], 200 lint, err_code = c.proof() return lint, err_code def main(): args_ = get_args() lint, exit_code = proof(args_.charm_name, args_.bundle, args_.debug) if lint: print("\n".join(lint)) sys.exit(exit_code) if __name__ == "__main__": main()