ansible-2.1.1.0/0000775000175400017540000000000012746444530014435 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/0000775000175400017540000000000012746444530015205 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible0000775000175400017540000001015412746444466016561 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type __requires__ = ['ansible'] try: import pkg_resources except Exception: # Use pkg_resources to find the correct versions of libraries and set # sys.path appropriately when there are multiversion installs. But we # have code that better expresses the errors in the places where the code # is actually used (the deps are optional for many code paths) so we don't # want to fail here. pass import os import shutil import sys import traceback # for debug from multiprocessing import Lock debug_lock = Lock() import ansible.constants as C from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.utils.display import Display from ansible.utils.unicode import to_unicode ######################################## ### OUTPUT OF LAST RESORT ### class LastResort(object): def display(self, msg): print(msg, file=sys.stderr) def error(self, msg, wrap_text=None): print(msg, file=sys.stderr) ######################################## if __name__ == '__main__': display = LastResort() cli = None me = os.path.basename(sys.argv[0]) try: display = Display() display.debug("starting run") sub = None try: if me.find('-') != -1: target = me.split('-') if len(target) > 1: sub = target[1] myclass = "%sCLI" % sub.capitalize() mycli = getattr(__import__("ansible.cli.%s" % sub, fromlist=[myclass]), myclass) elif me == 'ansible': from ansible.cli.adhoc import AdHocCLI as mycli else: raise AnsibleError("Unknown Ansible alias: %s" % me) except ImportError as e: if e.message.endswith(' %s' % sub): raise AnsibleError("Ansible sub-program not implemented: %s" % me) else: raise cli = mycli(sys.argv) cli.parse() exit_code = cli.run() except AnsibleOptionsError as e: cli.parser.print_help() display.error(to_unicode(e), wrap_text=False) exit_code = 5 except AnsibleParserError as e: display.error(to_unicode(e), wrap_text=False) exit_code = 4 # TQM takes care of these, but leaving comment to reserve the exit codes # except AnsibleHostUnreachable as e: # display.error(str(e)) # exit_code = 3 # except AnsibleHostFailed as e: # display.error(str(e)) # exit_code = 2 except AnsibleError as e: display.error(to_unicode(e), wrap_text=False) exit_code = 1 except KeyboardInterrupt: display.error("User interrupted execution") exit_code = 99 except Exception as e: have_cli_options = cli is not None and cli.options is not None display.error("Unexpected Exception: %s" % to_unicode(e), wrap_text=False) if not have_cli_options or have_cli_options and cli.options.verbosity > 2: display.display(u"the full traceback was:\n\n%s" % to_unicode(traceback.format_exc())) else: display.display("to see the full traceback, use -vvv") exit_code = 250 finally: # Remove ansible tempdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) sys.exit(exit_code) ansible-2.1.1.0/bin/ansible-console0000777000175400017540000000000012746444466021550 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible-doc0000777000175400017540000000000012746444466020653 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible-galaxy0000777000175400017540000000000012746444466021373 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible-playbook0000777000175400017540000000000012746444466021726 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible-pull0000777000175400017540000000000012746444466021062 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/bin/ansible-vault0000777000175400017540000000000012746444466021241 2ansibleustar jenkinsjenkins00000000000000ansible-2.1.1.0/contrib/0000775000175400017540000000000012746444530016075 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/contrib/inventory/0000775000175400017540000000000012746444530020132 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/contrib/inventory/abiquo.ini0000664000175400017540000000315612746444466022130 0ustar jenkinsjenkins00000000000000# Ansible external inventory script settings for Abiquo # # Define an Abiquo user with access to Abiquo API which will be used to # perform required queries to obtain infromation to generate the Ansible # inventory output. # [auth] apiuser = admin apipass = xabiquo # Specify Abiquo API version in major.minor format and the access URI to # API endpoint. Tested versions are: 2.6 , 3.0 and 3.1 # To confirm that your box haves access to Abiquo API you can perform a # curl command, replacing with suitable values, similar to this: # curl -X GET https://192.168.2.100/api/login -u admin:xabiquo # [api] version = 3.0 uri = https://192.168.2.100/api # You probably won't need to modify login preferences, but just in case login_path = /login login_type = application/vnd.abiquo.user+json # To avoid performing excessive calls to Abiquo API you can define a # cache for the plugin output. Within the time defined in seconds, latest # output will be reused. After that time, the cache will be refreshed. # [cache] cache_max_age = 30 cache_dir = /tmp [defaults] # Depending in your Abiquo environment, you may want to use only public IP # addresses (if using public cloud providers) or also private IP addresses. # You can set this with public_ip_only configuration. public_ip_only = false # default_net_interface only is used if public_ip_only = false # If public_ip_only is set to false, you can choose default nic to obtain # IP address to define the host. default_net_interface = nic0 # Only deployed VM are displayed in the plugin output. deployed_only = true # Define if VM metadata is obtained from Abiquo API. get_metadata = false ansible-2.1.1.0/contrib/inventory/abiquo.py0000775000175400017540000002123412746444466022001 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- ''' External inventory script for Abiquo ==================================== Shamelessly copied from an existing inventory script. This script generates an inventory that Ansible can understand by making API requests to Abiquo API Requires some python libraries, ensure to have them installed when using this script. This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. Before using this script you may want to modify abiquo.ini config file. This script generates an Ansible hosts file with these host groups: ABQ_xxx: Defines a hosts itself by Abiquo VM name label all: Contains all hosts defined in Abiquo user's enterprise virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it imagetemplate: Creates a host group for each image template containing all hosts using it ''' # (c) 2014, Daniel Beneyto # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import os import sys import time import ConfigParser try: import json except ImportError: import simplejson as json from ansible.module_utils.urls import open_url def api_get(link, config): try: if link == None: url = config.get('api','uri') + config.get('api','login_path') headers = {"Accept": config.get('api','login_type')} else: url = link['href'] + '?limit=0' headers = {"Accept": link['type']} result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''), url_password=config.get('auth','apipass').replace('\n', '')) return json.loads(result.read()) except: return None def save_cache(data, config): ''' saves item to cache ''' dpath = config.get('cache','cache_dir') try: cache = open('/'.join([dpath,'inventory']), 'w') cache.write(json.dumps(data)) cache.close() except IOError as e: pass # not really sure what to do here def get_cache(cache_item, config): ''' returns cached item ''' dpath = config.get('cache','cache_dir') inv = {} try: cache = open('/'.join([dpath,'inventory']), 'r') inv = cache.read() cache.close() except IOError as e: pass # not really sure what to do here return inv def cache_available(config): ''' checks if we have a 'fresh' cache available for item requested ''' if config.has_option('cache','cache_dir'): dpath = config.get('cache','cache_dir') try: existing = os.stat( '/'.join([dpath,'inventory'])) except: # cache doesn't exist or isn't accessible return False if config.has_option('cache', 'cache_max_age'): maxage = config.get('cache', 'cache_max_age') if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)): return True return False def generate_inv_from_api(enterprise_entity,config): try: inventory['all'] = {} inventory['all']['children'] = [] inventory['all']['hosts'] = [] inventory['_meta'] = {} inventory['_meta']['hostvars'] = {} enterprise = api_get(enterprise_entity,config) vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines')) vms = api_get(vms_entity,config) for vmcollection in vms['collection']: vm_vapp = next(link for link in (vmcollection['links']) if (link['rel']=='virtualappliance'))['title'].replace('[','').replace(']','').replace(' ','_') vm_vdc = next(link for link in (vmcollection['links']) if (link['rel']=='virtualdatacenter'))['title'].replace('[','').replace(']','').replace(' ','_') vm_template = next(link for link in (vmcollection['links']) if (link['rel']=='virtualmachinetemplate'))['title'].replace('[','').replace(']','').replace(' ','_') # From abiquo.ini: Only adding to inventory VMs with public IP if (config.getboolean('defaults', 'public_ip_only')) == True: for link in vmcollection['links']: if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'): vm_nic = link['title'] break else: vm_nic = None # Otherwise, assigning defined network interface IP address else: for link in vmcollection['links']: if (link['rel']==config.get('defaults', 'default_net_interface')): vm_nic = link['title'] break else: vm_nic = None vm_state = True # From abiquo.ini: Only adding to inventory VMs deployed if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')): vm_state = False if not vm_nic == None and vm_state: if not vm_vapp in inventory.keys(): inventory[vm_vapp] = {} inventory[vm_vapp]['children'] = [] inventory[vm_vapp]['hosts'] = [] if not vm_vdc in inventory.keys(): inventory[vm_vdc] = {} inventory[vm_vdc]['hosts'] = [] inventory[vm_vdc]['children'] = [] if not vm_template in inventory.keys(): inventory[vm_template] = {} inventory[vm_template]['children'] = [] inventory[vm_template]['hosts'] = [] if config.getboolean('defaults', 'get_metadata') == True: meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata')) try: metadata = api_get(meta_entity,config) if (config.getfloat("api","version") >= 3.0): vm_metadata = metadata['metadata'] else: vm_metadata = metadata['metadata']['metadata'] inventory['_meta']['hostvars'][vm_nic] = vm_metadata except Exception as e: pass inventory[vm_vapp]['children'].append(vmcollection['name']) inventory[vm_vdc]['children'].append(vmcollection['name']) inventory[vm_template]['children'].append(vmcollection['name']) inventory['all']['children'].append(vmcollection['name']) inventory[vmcollection['name']] = [] inventory[vmcollection['name']].append(vm_nic) return inventory except Exception as e: # Return empty hosts output return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } def get_inventory(enterprise, config): ''' Reads the inventory from cache or Abiquo api ''' if cache_available(config): inv = get_cache('inventory', config) else: default_group = os.path.basename(sys.argv[0]).rstrip('.py') # MAKE ABIQUO API CALLS # inv = generate_inv_from_api(enterprise,config) save_cache(inv, config) return json.dumps(inv) if __name__ == '__main__': inventory = {} enterprise = {} # Read config config = ConfigParser.SafeConfigParser() for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: if os.path.exists(configfilename): config.read(configfilename) break try: login = api_get(None,config) enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise')) except Exception as e: enterprise = None if cache_available(config): inventory = get_cache('inventory', config) else: inventory = get_inventory(enterprise, config) # return to ansible sys.stdout.write(str(inventory)) sys.stdout.flush() ansible-2.1.1.0/contrib/inventory/apache-libcloud.py0000775000175400017540000002675412746444466023551 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Sebastien Goasguen # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Apache Libcloud generic external inventory script ================================= Generates inventory that Ansible can understand by making API request to Cloud providers using the Apache libcloud library. This script also assumes there is a libcloud.ini file alongside it ''' import sys import os import argparse import re from time import time import ConfigParser from six import iteritems, string_types from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver import libcloud.security as sec try: import json except ImportError: import simplejson as json class LibcloudInventory(object): def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = {} # Index of hostname (address) to instance ID self.index = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the libcloud.ini file ''' config = ConfigParser.SafeConfigParser() libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini') libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path) config.read(libcloud_ini_path) if not config.has_section('driver'): raise ValueError('libcloud.ini file must contain a [driver] section') if config.has_option('driver', 'provider'): self.provider = config.get('driver','provider') else: raise ValueError('libcloud.ini does not have a provider defined') if config.has_option('driver', 'key'): self.key = config.get('driver','key') else: raise ValueError('libcloud.ini does not have a key defined') if config.has_option('driver', 'secret'): self.secret = config.get('driver','secret') else: raise ValueError('libcloud.ini does not have a secret defined') if config.has_option('driver', 'host'): self.host = config.get('driver', 'host') if config.has_option('driver', 'secure'): self.secure = config.get('driver', 'secure') if config.has_option('driver', 'verify_ssl_cert'): self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert') if config.has_option('driver', 'port'): self.port = config.get('driver', 'port') if config.has_option('driver', 'path'): self.path = config.get('driver', 'path') if config.has_option('driver', 'api_version'): self.api_version = config.get('driver', 'api_version') Driver = get_driver(getattr(Provider, self.provider)) self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure, host=self.host, path=self.path) # Cache related cache_path = config.get('cache', 'cache_path') self.cache_path_cache = cache_path + "/ansible-libcloud.cache" self.cache_path_index = cache_path + "/ansible-libcloud.index" self.cache_max_age = config.getint('cache', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to a location, and save data in cache files ''' self.get_nodes() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_nodes(self): ''' Gets the list of all nodes ''' for node in self.conn.list_nodes(): self.add_node(node) def get_node(self, node_id): ''' Gets details about a specific node ''' return [node for node in self.conn.list_nodes() if node.id == node_id][0] def add_node(self, node): ''' Adds a node to the inventory and index, as long as it is addressable ''' # Only want running instances if node.state != 0: return # Select the best destination address if not node.public_ips == []: dest = node.public_ips[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = node.name # Inventory: Group by instance ID (always a group of 1) self.inventory[node.name] = [dest] ''' # Inventory: Group by region self.push(self.inventory, region, dest) # Inventory: Group by availability zone self.push(self.inventory, node.placement, dest) # Inventory: Group by instance type self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) ''' # Inventory: Group by key pair if node.extra['key_name']: self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) # Inventory: Group by security group, quick thing to handle single sg if node.extra['security_group']: self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) # Inventory: Group by tag if node.extra['tags']: for tagkey in node.extra['tags'].keys(): self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host migh not exist anymore return self.json_format_dict({}, True) node_id = self.index[self.args.host] node = self.get_node(node_id) instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types if isinstance(value, (int, bool)): instance_vars[key] = value elif isinstance(value, string_types): instance_vars[key] = value.strip() elif value is None: instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2_tags': for k, v in iteritems(value): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join(group_ids) instance_vars["ec2_security_group_names"] = ','.join(group_names) else: pass # TODO Product codes if someone finds them useful #print(key) #print(type(value)) #print(value) return self.json_format_dict(instance_vars, True) def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def main(): LibcloudInventory() if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/azure_rm.ini0000664000175400017540000000143612746444466022473 0ustar jenkinsjenkins00000000000000# # Configuration file for azure_rm.py # [azure] # Control which resource groups are included. By default all resources groups are included. # Set resource_groups to a comma separated list of resource groups names. #resource_groups= # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs #tags= # Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) #locations= # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. include_powerstate=yes # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. group_by_resource_group=yes group_by_location=yes group_by_security_group=yes group_by_tag=yes ansible-2.1.1.0/contrib/inventory/azure_rm.py0000775000175400017540000007661312746444466022360 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright (c) 2016 Matt Davis, # Chris Houseknecht, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ''' Azure External Inventory Script =============================== Generates dynamic inventory by making API requests to the Azure Resource Manager using the AAzure Python SDK. For instruction on installing the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ Authentication -------------- The order of precedence is command line arguments, environment variables, and finally the [default] profile found in ~/.azure/credentials. If using a credentials file, it should be an ini formatted file with one or more sections, which we refer to as profiles. The script looks for a [default] section, if a profile is not specified either on the command line or with an environment variable. The keys in a profile will match the list of command line arguments below. For command line arguments and environment variables specify a profile found in your ~/.azure/credentials file, or a service principal or Active Directory user. Command line arguments: - profile - client_id - secret - subscription_id - tenant - ad_user - password Environment variables: - AZURE_PROFILE - AZURE_CLIENT_ID - AZURE_SECRET - AZURE_SUBSCRIPTION_ID - AZURE_TENANT - AZURE_AD_USER - AZURE_PASSWORD Run for Specific Host ----------------------- When run for a specific host using the --host option, a resource group is required. For a specific host, this script returns the following variables: { "ansible_host": "XXX.XXX.XXX.XXX", "computer_name": "computer_name2", "fqdn": null, "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", "image": { "offer": "CentOS", "publisher": "OpenLogic", "sku": "7.1", "version": "latest" }, "location": "westus", "mac_address": "00-0D-3A-31-2C-EC", "name": "object-name", "network_interface": "interface-name", "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", "network_security_group": null, "network_security_group_id": null, "os_disk": { "name": "object-name", "operating_system_type": "Linux" }, "plan": null, "powerstate": "running", "private_ip": "172.26.3.6", "private_ip_alloc_method": "Static", "provisioning_state": "Succeeded", "public_ip": "XXX.XXX.XXX.XXX", "public_ip_alloc_method": "Static", "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", "public_ip_name": "object-name", "resource_group": "galaxy-production", "security_group": "object-name", "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", "tags": { "db": "database" }, "type": "Microsoft.Compute/virtualMachines", "virtual_machine_size": "Standard_DS4" } Groups ------ When run in --list mode, instances are grouped by the following categories: - azure - location - resource_group - security_group - tag key - tag key_value Control groups using azure_rm.ini or set environment variables: AZURE_GROUP_BY_RESOURCE_GROUP=yes AZURE_GROUP_BY_LOCATION=yes AZURE_GROUP_BY_SECURITY_GROUP=yes AZURE_GROUP_BY_TAG=yes Select hosts within specific resource groups by assigning a comma separated list to: AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b Select hosts for specific tag key by assigning a comma separated list of tag keys to: AZURE_TAGS=key1,key2,key3 Select hosts for specific locations: AZURE_LOCATIONS=eastus,westus,eastus2 Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: AZURE_TAGS=key1:value1,key2:value2 If you don't need the powerstate, you can improve performance by turning off powerstate fetching: AZURE_INCLUDE_POWERSTATE=no azure_rm.ini ---------------------- As mentioned above you can control execution using environment variables or an .ini file. A sample azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case 'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this script and having matching .ini files. Go forth and customize your Azure inventory! Powerstate: ----------- The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is up. If the value is anything other than 'running', the machine is down, and will be unreachable. Examples: --------- Execute /bin/uname on all instances in the galaxy-qa resource group $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" Use the inventory script to print instance specific information $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty Use with a playbook $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa Insecure Platform Warning ------------------------- If you receive InsecurePlatformWarning from urllib3, install the requests security packages: pip install requests[security] author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) Company: Ansible by Red Hat Version: 1.0.0 ''' import argparse import ConfigParser import json import os import re import sys from distutils.version import LooseVersion from os.path import expanduser HAS_AZURE = True HAS_AZURE_EXC = None try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.compute import __version__ as azure_compute_version from azure.common import AzureMissingResourceHttpError, AzureHttpError from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials from azure.mgmt.network.network_management_client import NetworkManagementClient from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient from azure.mgmt.compute.compute_management_client import ComputeManagementClient except ImportError as exc: HAS_AZURE_EXC = exc HAS_AZURE = False AZURE_CREDENTIAL_ENV_MAPPING = dict( profile='AZURE_PROFILE', subscription_id='AZURE_SUBSCRIPTION_ID', client_id='AZURE_CLIENT_ID', secret='AZURE_SECRET', tenant='AZURE_TENANT', ad_user='AZURE_AD_USER', password='AZURE_PASSWORD' ) AZURE_CONFIG_SETTINGS = dict( resource_groups='AZURE_RESOURCE_GROUPS', tags='AZURE_TAGS', locations='AZURE_LOCATIONS', include_powerstate='AZURE_INCLUDE_POWERSTATE', group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', group_by_location='AZURE_GROUP_BY_LOCATION', group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', group_by_tag='AZURE_GROUP_BY_TAG' ) AZURE_MIN_VERSION = "0.30.0rc5" def azure_id_to_dict(id): pieces = re.sub(r'^\/', '', id).split('/') result = {} index = 0 while index < len(pieces) - 1: result[pieces[index]] = pieces[index + 1] index += 1 return result class AzureRM(object): def __init__(self, args): self._args = args self._compute_client = None self._resource_client = None self._network_client = None self.debug = False if args.debug: self.debug = True self.credentials = self._get_credentials(args) if not self.credentials: self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " "or define a profile in ~/.azure/credentials.") if self.credentials.get('subscription_id', None) is None: self.fail("Credentials did not include a subscription_id value.") self.log("setting subscription_id") self.subscription_id = self.credentials['subscription_id'] if self.credentials.get('client_id') is not None and \ self.credentials.get('secret') is not None and \ self.credentials.get('tenant') is not None: self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], secret=self.credentials['secret'], tenant=self.credentials['tenant']) elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password']) else: self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " "Credentials must include client_id, secret and tenant or ad_user and password.") def log(self, msg): if self.debug: print (msg + u'\n') def fail(self, msg): raise Exception(msg) def _get_profile(self, profile="default"): path = expanduser("~") path += "/.azure/credentials" try: config = ConfigParser.ConfigParser() config.read(path) except Exception as exc: self.fail("Failed to access {0}. Check that the file exists and you have read " "access. {1}".format(path, str(exc))) credentials = dict() for key in AZURE_CREDENTIAL_ENV_MAPPING: try: credentials[key] = config.get(profile, key, raw=True) except: pass if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: return credentials return None def _get_env_credentials(self): env_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems(): env_credentials[attribute] = os.environ.get(env_variable, None) if env_credentials['profile'] is not None: credentials = self._get_profile(env_credentials['profile']) return credentials if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: return env_credentials return None def _get_credentials(self, params): # Get authentication credentials. # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. self.log('Getting credentials') arg_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems(): arg_credentials[attribute] = getattr(params, attribute) # try module params if arg_credentials['profile'] is not None: self.log('Retrieving credentials with profile parameter.') credentials = self._get_profile(arg_credentials['profile']) return credentials if arg_credentials['client_id'] is not None: self.log('Received credentials from parameters.') return arg_credentials # try environment env_credentials = self._get_env_credentials() if env_credentials: self.log('Received credentials from env.') return env_credentials # try default profile from ~./azure/credentials default_credentials = self._get_profile() if default_credentials: self.log('Retrieved default profile credentials from ~/.azure/credentials.') return default_credentials return None def _register(self, key): try: # We have to perform the one-time registration here. Otherwise, we receive an error the first # time we attempt to use the requested client. resource_client = self.rm_client resource_client.providers.register(key) except Exception as exc: self.fail("One-time registration of {0} failed - {1}".format(key, str(exc))) @property def network_client(self): self.log('Getting network client') if not self._network_client: self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Network') return self._network_client @property def rm_client(self): self.log('Getting resource manager client') if not self._resource_client: self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id) return self._resource_client @property def compute_client(self): self.log('Getting compute client') if not self._compute_client: self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Compute') return self._compute_client class AzureInventory(object): def __init__(self): self._args = self._parse_cli_args() try: rm = AzureRM(self._args) except Exception as e: sys.exit("{0}".format(str(e))) self._compute_client = rm.compute_client self._network_client = rm.network_client self._resource_client = rm.rm_client self._security_groups = None self.resource_groups = [] self.tags = None self.locations = None self.replace_dash_in_groups = False self.group_by_resource_group = True self.group_by_location = True self.group_by_security_group = True self.group_by_tag = True self.include_powerstate = True self._inventory = dict( _meta=dict( hostvars=dict() ), azure=[] ) self._get_settings() if self._args.resource_groups: self.resource_groups = self._args.resource_groups.split(',') if self._args.tags: self.tags = self._args.tags.split(',') if self._args.locations: self.locations = self._args.locations.split(',') if self._args.no_powerstate: self.include_powerstate = False self.get_inventory() print (self._json_format_dict(pretty=self._args.pretty)) sys.exit(0) def _parse_cli_args(self): # Parse command line arguments parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file for an Azure subscription') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--debug', action='store_true', default=False, help='Send debug messages to STDOUT') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output(default: False)') parser.add_argument('--profile', action='store', help='Azure profile contained in ~/.azure/credentials') parser.add_argument('--subscription_id', action='store', help='Azure Subscription Id') parser.add_argument('--client_id', action='store', help='Azure Client Id ') parser.add_argument('--secret', action='store', help='Azure Client Secret') parser.add_argument('--tenant', action='store', help='Azure Tenant Id') parser.add_argument('--ad-user', action='store', help='Active Directory User') parser.add_argument('--password', action='store', help='password') parser.add_argument('--resource-groups', action='store', help='Return inventory for comma separated list of resource group names') parser.add_argument('--tags', action='store', help='Return inventory for comma separated list of tag key:value pairs') parser.add_argument('--locations', action='store', help='Return inventory for comma separated list of locations') parser.add_argument('--no-powerstate', action='store_true', default=False, help='Do not include the power state of each virtual host') return parser.parse_args() def get_inventory(self): if len(self.resource_groups) > 0: # get VMs for requested resource groups for resource_group in self.resource_groups: try: virtual_machines = self._compute_client.virtual_machines.list(resource_group) except Exception as exc: sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) if self._args.host or self.tags: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) else: self._load_machines(virtual_machines) else: # get all VMs within the subscription try: virtual_machines = self._compute_client.virtual_machines.list_all() except Exception as exc: sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) if self._args.host or self.tags or self.locations: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) else: self._load_machines(virtual_machines) def _load_machines(self, machines): for machine in machines: id_dict = azure_id_to_dict(machine.id) #TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets # fixed, we should remove the .lower(). Opened Issue # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 resource_group = id_dict['resourceGroups'].lower() if self.group_by_security_group: self._get_security_groups(resource_group) host_vars = dict( ansible_host=None, private_ip=None, private_ip_alloc_method=None, public_ip=None, public_ip_name=None, public_ip_id=None, public_ip_alloc_method=None, fqdn=None, location=machine.location, name=machine.name, type=machine.type, id=machine.id, tags=machine.tags, network_interface_id=None, network_interface=None, resource_group=resource_group, mac_address=None, plan=(machine.plan.name if machine.plan else None), virtual_machine_size=machine.hardware_profile.vm_size, computer_name=machine.os_profile.computer_name, provisioning_state=machine.provisioning_state, ) host_vars['os_disk'] = dict( name=machine.storage_profile.os_disk.name, operating_system_type=machine.storage_profile.os_disk.os_type.value ) if self.include_powerstate: host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) if machine.storage_profile.image_reference: host_vars['image'] = dict( offer=machine.storage_profile.image_reference.offer, publisher=machine.storage_profile.image_reference.publisher, sku=machine.storage_profile.image_reference.sku, version=machine.storage_profile.image_reference.version ) # Add windows details if machine.os_profile.windows_configuration is not None: host_vars['windows_auto_updates_enabled'] = \ machine.os_profile.windows_configuration.enable_automatic_updates host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone host_vars['windows_rm'] = None if machine.os_profile.windows_configuration.win_rm is not None: host_vars['windows_rm'] = dict(listeners=None) if machine.os_profile.windows_configuration.win_rm.listeners is not None: host_vars['windows_rm']['listeners'] = [] for listener in machine.os_profile.windows_configuration.win_rm.listeners: host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol, certificate_url=listener.certificate_url)) for interface in machine.network_profile.network_interfaces: interface_reference = self._parse_ref_id(interface.id) network_interface = self._network_client.network_interfaces.get( interface_reference['resourceGroups'], interface_reference['networkInterfaces']) if network_interface.primary: if self.group_by_security_group and \ self._security_groups[resource_group].get(network_interface.id, None): host_vars['security_group'] = \ self._security_groups[resource_group][network_interface.id]['name'] host_vars['security_group_id'] = \ self._security_groups[resource_group][network_interface.id]['id'] host_vars['network_interface'] = network_interface.name host_vars['network_interface_id'] = network_interface.id host_vars['mac_address'] = network_interface.mac_address for ip_config in network_interface.ip_configurations: host_vars['private_ip'] = ip_config.private_ip_address host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method if ip_config.public_ip_address: public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) public_ip_address = self._network_client.public_ip_addresses.get( public_ip_reference['resourceGroups'], public_ip_reference['publicIPAddresses']) host_vars['ansible_host'] = public_ip_address.ip_address host_vars['public_ip'] = public_ip_address.ip_address host_vars['public_ip_name'] = public_ip_address.name host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method host_vars['public_ip_id'] = public_ip_address.id if public_ip_address.dns_settings: host_vars['fqdn'] = public_ip_address.dns_settings.fqdn self._add_host(host_vars) def _selected_machines(self, virtual_machines): selected_machines = [] for machine in virtual_machines: if self._args.host and self._args.host == machine.name: selected_machines.append(machine) if self.tags and self._tags_match(machine.tags, self.tags): selected_machines.append(machine) if self.locations and machine.location in self.locations: selected_machines.append(machine) return selected_machines def _get_security_groups(self, resource_group): ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' if not self._security_groups: self._security_groups = dict() if not self._security_groups.get(resource_group): self._security_groups[resource_group] = dict() for group in self._network_client.network_security_groups.list(resource_group): if group.network_interfaces: for interface in group.network_interfaces: self._security_groups[resource_group][interface.id] = dict( name=group.name, id=group.id ) def _get_powerstate(self, resource_group, name): try: vm = self._compute_client.virtual_machines.get(resource_group, name, expand='instanceview') except Exception as exc: sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) return next((s.code.replace('PowerState/', '') for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) def _add_host(self, vars): host_name = self._to_safe(vars['name']) resource_group = self._to_safe(vars['resource_group']) security_group = None if vars.get('security_group'): security_group = self._to_safe(vars['security_group']) if self.group_by_resource_group: if not self._inventory.get(resource_group): self._inventory[resource_group] = [] self._inventory[resource_group].append(host_name) if self.group_by_location: if not self._inventory.get(vars['location']): self._inventory[vars['location']] = [] self._inventory[vars['location']].append(host_name) if self.group_by_security_group and security_group: if not self._inventory.get(security_group): self._inventory[security_group] = [] self._inventory[security_group].append(host_name) self._inventory['_meta']['hostvars'][host_name] = vars self._inventory['azure'].append(host_name) if self.group_by_tag and vars.get('tags'): for key, value in vars['tags'].iteritems(): safe_key = self._to_safe(key) safe_value = safe_key + '_' + self._to_safe(value) if not self._inventory.get(safe_key): self._inventory[safe_key] = [] if not self._inventory.get(safe_value): self._inventory[safe_value] = [] self._inventory[safe_key].append(host_name) self._inventory[safe_value].append(host_name) def _json_format_dict(self, pretty=False): # convert inventory to json if pretty: return json.dumps(self._inventory, sort_keys=True, indent=2) else: return json.dumps(self._inventory) def _get_settings(self): # Load settings from the .ini, if it exists. Otherwise, # look for environment values. file_settings = self._load_settings() if file_settings: for key in AZURE_CONFIG_SETTINGS: if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): values = file_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) elif file_settings.get(key): val = self._to_boolean(file_settings[key]) setattr(self, key, val) else: env_settings = self._get_env_settings() for key in AZURE_CONFIG_SETTINGS: if key in('resource_groups', 'tags', 'locations') and env_settings.get(key): values = env_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) elif env_settings.get(key, None) is not None: val = self._to_boolean(env_settings[key]) setattr(self, key, val) def _parse_ref_id(self, reference): response = {} keys = reference.strip('/').split('/') for index in range(len(keys)): if index < len(keys) - 1 and index % 2 == 0: response[keys[index]] = keys[index + 1] return response def _to_boolean(self, value): if value in ['Yes', 'yes', 1, 'True', 'true', True]: result = True elif value in ['No', 'no', 0, 'False', 'false', False]: result = False else: result = True return result def _get_env_settings(self): env_settings = dict() for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems(): env_settings[attribute] = os.environ.get(env_variable, None) return env_settings def _load_settings(self): basename = os.path.splitext(os.path.basename(__file__))[0] path = basename + '.ini' config = None settings = None try: config = ConfigParser.ConfigParser() config.read(path) except: pass if config is not None: settings = dict() for key in AZURE_CONFIG_SETTINGS: try: settings[key] = config.get('azure', key, raw=True) except: pass return settings def _tags_match(self, tag_obj, tag_args): ''' Return True if the tags object from a VM contains the requested tag values. :param tag_obj: Dictionary of string:string pairs :param tag_args: List of strings in the form key=value :return: boolean ''' if not tag_obj: return False matches = 0 for arg in tag_args: arg_key = arg arg_value = None if re.search(r':', arg): arg_key, arg_value = arg.split(':') if arg_value and tag_obj.get(arg_key, None) == arg_value: matches += 1 elif not arg_value and tag_obj.get(arg_key, None) is not None: matches += 1 if matches == len(tag_args): return True return False def _to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word) def main(): if not HAS_AZURE: sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC)) if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION): sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} " "Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version)) AzureInventory() if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/brook.ini0000664000175400017540000000275412746444466021767 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # Copyright 2016 Doalitic. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # The Brook.io inventory script has the following dependencies: # 1. A working Brook.io account # See https://brook.io # 2. A valid token generated through the 'API token' panel of Brook.io # 3. The libbrook python libray. # See https://github.com/doalitic/libbrook # # Author: Francisco Ros [brook] # Valid API token (required). # E.g. 'Aed342a12A60433697281FeEe1a4037C' # api_token = # Project id within Brook.io, as obtained from the project settings (optional). If provided, the # generated inventory will just include the hosts that belong to such project. Otherwise, it will # include all hosts in projects the requesting user has access to. The response includes groups # 'project_x', being 'x' the project name. # E.g. '2e8e099e1bc34cc0979d97ac34e9577b' # project_id = ansible-2.1.1.0/contrib/inventory/brook.py0000775000175400017540000002244012746444466021635 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2016 Doalitic. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Brook.io external inventory script ================================== Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook library. Hence, such dependency must be installed in the system to run this script. The default configuration file is named 'brook.ini' and is located alongside this script. You can choose any other file by setting the BROOK_INI_PATH environment variable. If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in projects where the requesting user belongs. Otherwise, only instances from the given project are included, provided the requesting user belongs to it. The following variables are established for every host. They can be retrieved from the hostvars dictionary. - brook_name: str - brook_description: str - brook_project: str - brook_template: str - brook_region: str - brook_status: str - brook_tags: list(str) - brook_internal_ips: list(str) - brook_external_ips: list(str) - brook_created_at - brook_updated_at - ansible_ssh_host Instances are grouped by the following categories: - tag: A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist instances with tags 'foo' and/or 'bar'. - project: A group is created for each project. E.g. group 'project_test' is created if a project named 'test' exist. - status: A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING' are created if there are instances in running and pending state. Examples: Execute uname on all instances in project 'test' $ ansible -i brook.py project_test -m shell -a "/bin/uname -a" Install nginx on all debian web servers tagged with 'www' $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present" Run site.yml playbook on web servers $ ansible-playbook -i brook.py site.yml -l tag_www Support: This script is tested on Python 2.7 and 3.4. It may work on other versions though. Author: Francisco Ros Version: 0.1 """ import sys import os try: from ConfigParser import SafeConfigParser as ConfigParser except ImportError: from configparser import ConfigParser try: import json except ImportError: import simplejson as json try: import libbrook except: sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook') class BrookInventory: _API_ENDPOINT = 'https://api.brook.io' def __init__(self): self._configure_from_file() self.client = self.get_api_client() self.inventory = self.get_inventory() def _configure_from_file(self): """Initialize from .ini file. Configuration file is assumed to be named 'brook.ini' and to be located on the same directory than this file, unless the environment variable BROOK_INI_PATH says otherwise. """ brook_ini_default_path = \ os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini') brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path) config = ConfigParser(defaults={ 'api_token': '', 'project_id': '' }) config.read(brook_ini_path) self.api_token = config.get('brook', 'api_token') self.project_id = config.get('brook', 'project_id') if not self.api_token: print('You must provide (at least) your Brook.io API token to generate the dynamic ' 'inventory.') sys.exit(1) def get_api_client(self): """Authenticate user via the provided credentials and return the corresponding API client. """ # Get JWT token from API token # unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT) auth_api = libbrook.AuthApi(unauthenticated_client) api_token = libbrook.AuthTokenRequest() api_token.token = self.api_token jwt = auth_api.auth_token(token=api_token) # Create authenticated API client # return libbrook.ApiClient(host=self._API_ENDPOINT, header_name='Authorization', header_value='Bearer %s' % jwt.token) def get_inventory(self): """Generate Ansible inventory. """ groups = dict() meta = dict() meta['hostvars'] = dict() instances_api = libbrook.InstancesApi(self.client) projects_api = libbrook.ProjectsApi(self.client) templates_api = libbrook.TemplatesApi(self.client) # If no project is given, get all projects the requesting user has access to # if not self.project_id: projects = [project.id for project in projects_api.index_projects()] else: projects = [self.project_id] # Build inventory from instances in all projects # for project_id in projects: project = projects_api.show_project(project_id=project_id) for instance in instances_api.index_instances(project_id=project_id): # Get template used for this instance template = templates_api.show_template(template_id=instance.template) # Update hostvars try: meta['hostvars'][instance.name] = \ self.hostvars(project, instance, template, instances_api) except libbrook.rest.ApiException: continue # Group by project project_group = 'project_%s' % project.name if project_group in groups.keys(): groups[project_group].append(instance.name) else: groups[project_group] = [instance.name] # Group by status status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status'] if status_group in groups.keys(): groups[status_group].append(instance.name) else: groups[status_group] = [instance.name] # Group by tags tags = meta['hostvars'][instance.name]['brook_tags'] for tag in tags: tag_group = 'tag_%s' % tag if tag_group in groups.keys(): groups[tag_group].append(instance.name) else: groups[tag_group] = [instance.name] groups['_meta'] = meta return groups def hostvars(self, project, instance, template, api): """Return the hostvars dictionary for the given instance. Raise libbrook.rest.ApiException if it cannot retrieve all required information from the Brook.io API. """ hostvars = instance.to_dict() hostvars['brook_name'] = hostvars.pop('name') hostvars['brook_description'] = hostvars.pop('description') hostvars['brook_project'] = hostvars.pop('project') hostvars['brook_template'] = hostvars.pop('template') hostvars['brook_region'] = hostvars.pop('region') hostvars['brook_created_at'] = hostvars.pop('created_at') hostvars['brook_updated_at'] = hostvars.pop('updated_at') del hostvars['id'] del hostvars['key'] del hostvars['provider'] del hostvars['image'] # Substitute identifiers for names # hostvars['brook_project'] = project.name hostvars['brook_template'] = template.name # Retrieve instance state # status = api.status_instance(project_id=project.id, instance_id=instance.id) hostvars.update({'brook_status': status.state}) # Retrieve instance tags # tags = api.instance_tags(project_id=project.id, instance_id=instance.id) hostvars.update({'brook_tags': tags}) # Retrieve instance addresses # addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id) internal_ips = [address.address for address in addresses if address.scope == 'internal'] external_ips = [address.address for address in addresses if address.address and address.scope == 'external'] hostvars.update({'brook_internal_ips': internal_ips}) hostvars.update({'brook_external_ips': external_ips}) try: hostvars.update({'ansible_ssh_host': external_ips[0]}) except IndexError: raise libbrook.rest.ApiException(status='502', reason='Instance without public IP') return hostvars # Run the script # brook = BrookInventory() print(json.dumps(brook.inventory)) ansible-2.1.1.0/contrib/inventory/cloudstack.ini0000664000175400017540000000024112746444466022774 0ustar jenkinsjenkins00000000000000[cloudstack] #endpoint = https://api.exoscale.ch/compute endpoint = https://cloud.example.com/client/api key = cloudstack api key secret = cloudstack api secret ansible-2.1.1.0/contrib/inventory/cloudstack.py0000775000175400017540000002006312746444466022654 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # (c) 2015, René Moser # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### """ Ansible CloudStack external inventory script. ============================================= Generates Ansible inventory from CloudStack. Configuration is read from 'cloudstack.ini'. If you need to pass the project, write a simple wrapper script, e.g. project_cloudstack.sh: #!/bin/bash cloudstack.py --project $@ When run against a specific host, this script returns the following attributes based on the data obtained from CloudStack API: "web01": { "cpu_number": 2, "nic": [ { "ip": "10.102.76.98", "mac": "02:00:50:99:00:01", "type": "Isolated", "netmask": "255.255.255.0", "gateway": "10.102.76.1" }, { "ip": "10.102.138.63", "mac": "06:b7:5a:00:14:84", "type": "Shared", "netmask": "255.255.255.0", "gateway": "10.102.138.1" } ], "default_ip": "10.102.76.98", "zone": "ZUERICH", "created": "2014-07-02T07:53:50+0200", "hypervisor": "VMware", "memory": 2048, "state": "Running", "tags": [], "cpu_speed": 1800, "affinity_group": [], "service_offering": "Small", "cpu_used": "62%" } usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] """ from __future__ import print_function import os import sys import argparse try: import json except: import simplejson as json try: from cs import CloudStack, CloudStackException, read_config except ImportError: print("Error: CloudStack library must be installed: pip install cs.", file=sys.stderr) sys.exit(1) class CloudStackInventory(object): def __init__(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') parser.add_argument('--project') options = parser.parse_args() try: self.cs = CloudStack(**read_config()) except CloudStackException as e: print("Error: Could not connect to CloudStack API", file=sys.stderr) project_id = '' if options.project: project_id = self.get_project_id(options.project) if options.host: data = self.get_host(options.host, project_id) print(json.dumps(data, indent=2)) elif options.list: data = self.get_list(project_id) print(json.dumps(data, indent=2)) else: print("usage: --list | --host [--project ]", file=sys.stderr) sys.exit(1) def get_project_id(self, project): projects = self.cs.listProjects() if projects: for p in projects['project']: if p['name'] == project or p['id'] == project: return p['id'] print("Error: Project %s not found." % project, file=sys.stderr) sys.exit(1) def get_host(self, name, project_id=''): hosts = self.cs.listVirtualMachines(projectid=project_id) data = {} if not hosts: return data for host in hosts['virtualmachine']: host_name = host['displayname'] if name == host_name: data['zone'] = host['zonename'] if 'group' in host: data['group'] = host['group'] data['state'] = host['state'] data['service_offering'] = host['serviceofferingname'] data['affinity_group'] = host['affinitygroup'] data['security_group'] = host['securitygroup'] data['cpu_number'] = host['cpunumber'] data['cpu_speed'] = host['cpuspeed'] if 'cpuused' in host: data['cpu_used'] = host['cpuused'] data['memory'] = host['memory'] data['tags'] = host['tags'] data['hypervisor'] = host['hypervisor'] data['created'] = host['created'] data['nic'] = [] for nic in host['nic']: data['nic'].append({ 'ip': nic['ipaddress'], 'mac': nic['macaddress'], 'netmask': nic['netmask'], 'gateway': nic['gateway'], 'type': nic['type'], }) if nic['isdefault']: data['default_ip'] = nic['ipaddress'] break; return data def get_list(self, project_id=''): data = { 'all': { 'hosts': [], }, '_meta': { 'hostvars': {}, }, } groups = self.cs.listInstanceGroups(projectid=project_id) if groups: for group in groups['instancegroup']: group_name = group['name'] if group_name and not group_name in data: data[group_name] = { 'hosts': [] } hosts = self.cs.listVirtualMachines(projectid=project_id) if not hosts: return data for host in hosts['virtualmachine']: host_name = host['displayname'] data['all']['hosts'].append(host_name) data['_meta']['hostvars'][host_name] = {} data['_meta']['hostvars'][host_name]['zone'] = host['zonename'] if 'group' in host: data['_meta']['hostvars'][host_name]['group'] = host['group'] data['_meta']['hostvars'][host_name]['state'] = host['state'] data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname'] data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup'] data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup'] data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber'] data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed'] if 'cpuused' in host: data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused'] data['_meta']['hostvars'][host_name]['created'] = host['created'] data['_meta']['hostvars'][host_name]['memory'] = host['memory'] data['_meta']['hostvars'][host_name]['tags'] = host['tags'] data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor'] data['_meta']['hostvars'][host_name]['created'] = host['created'] data['_meta']['hostvars'][host_name]['nic'] = [] for nic in host['nic']: data['_meta']['hostvars'][host_name]['nic'].append({ 'ip': nic['ipaddress'], 'mac': nic['macaddress'], 'netmask': nic['netmask'], 'gateway': nic['gateway'], 'type': nic['type'], }) if nic['isdefault']: data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress'] group_name = '' if 'group' in host: group_name = host['group'] if group_name and group_name in data: data[group_name]['hosts'].append(host_name) return data if __name__ == '__main__': CloudStackInventory() ansible-2.1.1.0/contrib/inventory/cobbler.ini0000664000175400017540000000106412746444466022254 0ustar jenkinsjenkins00000000000000# Ansible Cobbler external inventory script settings # [cobbler] host = http://PATH_TO_COBBLER_SERVER/cobbler_api # API calls to Cobbler can be slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-cobbler.cache # - ansible-cobbler.index cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 900 ansible-2.1.1.0/contrib/inventory/cobbler.py0000775000175400017540000002345512746444466022140 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python """ Cobbler external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. To use this, copy this file over /etc/ansible/hosts and chmod +x the file. This, more or less, allows you to keep one central database containing info about all of your managed instances. This script is an example of sourcing that data from Cobbler (http://cobbler.github.com). With cobbler each --mgmt-class in cobbler will correspond to a group in Ansible, and --ks-meta variables will be passed down for use in templates or even in argument lines. NOTE: The cobbler system names will not be used. Make sure a cobbler --dns-name is set for each cobbler system. If a system appears with two DNS names we do not add it twice because we don't want ansible talking to it twice. The first one found will be used. If no --dns-name is set the system will NOT be visible to ansible. We do not add cobbler system names because there is no requirement in cobbler that those correspond to addresses. See http://ansible.github.com/api.html for more info Tested with Cobbler 2.0.11. Changelog: - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in higher performance at ansible startup. Groups are determined by owner rather than default mgmt_classes. DNS name determined from hostname. cobbler values are written to a 'cobbler' fact namespace - 2013-09-01 pgehres: Refactored implementation to make use of caching and to limit the number of connections to external cobbler server for performance. Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 """ # (c) 2012, Michael DeHaan # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import argparse import ConfigParser import os import re from time import time import xmlrpclib try: import json except ImportError: import simplejson as json from six import iteritems # NOTE -- this file assumes Ansible is being accessed FROM the cobbler # server, so it does not attempt to login with a username and password. # this will be addressed in a future version of this script. orderby_keyname = 'owners' # alternatively 'mgmt_classes' class CobblerInventory(object): def __init__(self): """ Main execution path """ self.conn = None self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): self.update_cache() else: self.load_inventory_from_cache() self.load_cache_from_cache() data_to_print = "" # Data to print if self.args.host: data_to_print += self.get_host_info() else: self.inventory['_meta'] = { 'hostvars': {} } for hostname in self.cache: self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] } data_to_print += self.json_format_dict(self.inventory, True) print(data_to_print) def _connect(self): if not self.conn: self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True) def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): return True return False def read_settings(self): """ Reads the settings from the cobbler.ini file """ config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini') self.cobbler_host = config.get('cobbler', 'host') # Cache related cache_path = config.get('cobbler', 'cache_path') self.cache_path_cache = cache_path + "/ansible-cobbler.cache" self.cache_path_inventory = cache_path + "/ansible-cobbler.index" self.cache_max_age = config.getint('cobbler', 'cache_max_age') def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)') self.args = parser.parse_args() def update_cache(self): """ Make calls to cobbler and save the output in a cache """ self._connect() self.groups = dict() self.hosts = dict() data = self.conn.get_systems() for host in data: # Get the FQDN for the host and add it to the right groups dns_name = host['hostname'] #None ksmeta = None interfaces = host['interfaces'] # hostname is often empty for non-static IP hosts if dns_name == '': for (iname, ivalue) in iteritems(interfaces): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name is not "": dns_name = this_dns_name if dns_name == '': continue status = host['status'] profile = host['profile'] classes = host[orderby_keyname] if status not in self.inventory: self.inventory[status] = [] self.inventory[status].append(dns_name) if profile not in self.inventory: self.inventory[profile] = [] self.inventory[profile].append(dns_name) for cls in classes: if cls not in self.inventory: self.inventory[cls] = [] self.inventory[cls].append(dns_name) # Since we already have all of the data for the host, update the host details as well # The old way was ksmeta only -- provide backwards compatibility self.cache[dns_name] = host if "ks_meta" in host: for key, value in iteritems(host["ks_meta"]): self.cache[dns_name][key] = value self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) def get_host_info(self): """ Get variables about a specific host """ if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if not self.args.host in self.cache: # try updating the cache self.update_cache() if not self.args.host in self.cache: # host might not exist anymore return self.json_format_dict({}, True) return self.json_format_dict(self.cache[self.args.host], True) def push(self, my_dict, key, element): """ Pushed an element onto an array that may not have been defined in the dict """ if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def load_inventory_from_cache(self): """ Reads the index from the cache file sets self.index """ cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) def write_to_cache(self, data, filename): """ Writes data in JSON format to a file """ json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) CobblerInventory() ansible-2.1.1.0/contrib/inventory/collins.ini0000664000175400017540000000366612746444466022321 0ustar jenkinsjenkins00000000000000# Ansible Collins external inventory script settings # [collins] host = http://localhost:9000 username = blake password = admin:first # Specifies a timeout for all HTTP requests to Collins. timeout_secs = 120 # Specifies a maximum number of retries per Collins request. max_retries = 5 # Specifies the number of results to return per paginated query as specified in # the Pagination section of the Collins API docs: # http://tumblr.github.io/collins/api.html results_per_query = 100 # Specifies the Collins asset type which will be queried for; most typically # you'll want to leave this at the default of SERVER_NODE. asset_type = SERVER_NODE # Collins assets can optionally be assigned hostnames; this option will preference # the selection of an asset's hostname over an IP address as the primary identifier # in the Ansible inventory. Typically, this value should be set to true if assets # are assigned hostnames. prefer_hostnames = true # Within Collins, assets can be granted multiple IP addresses; this configuration # value specifies the index within the 'ADDRESSES' array as returned by the # following API endpoint: # http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section ip_address_index = 0 # Sets whether Collins instances in multiple datacenters will be queried. query_remote_dcs = false # API calls to Collins can involve large, substantial queries. For this reason, # we cache the results of an API call. Set this to the path you want cache files # to be written to. Two files will be written to this directory: # - ansible-collins.cache # - ansible-collins.index cache_path = /tmp # If errors occur while querying inventory, logging messages will be written # to a logfile in the specified directory: # - ansible-collins.log log_path = /tmp # The number of seconds that a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 600 ansible-2.1.1.0/contrib/inventory/collins.py0000775000175400017540000004306212746444466022167 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python """ Collins external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. Collins is a hardware asset management system originally developed by Tumblr for tracking new hardware as it built out its own datacenters. It exposes a rich API for manipulating and querying one's hardware inventory, which makes it an ideal 'single point of truth' for driving systems automation like Ansible. Extensive documentation on Collins, including a quickstart, API docs, and a full reference manual, can be found here: http://tumblr.github.io/collins This script adds support to Ansible for obtaining a dynamic inventory of assets in your infrastructure, grouping them in Ansible by their useful attributes, and binding all facts provided by Collins to each host so that they can be used to drive automation. Some parts of this script were cribbed shamelessly from mdehaan's Cobbler inventory script. To use it, copy it to your repo and pass -i to the ansible or ansible-playbook command; if you'd like to use it by default, simply copy collins.ini to /etc/ansible and this script to /etc/ansible/hosts. Alongside the options set in collins.ini, there are several environment variables that will be used instead of the configured values if they are set: - COLLINS_USERNAME - specifies a username to use for Collins authentication - COLLINS_PASSWORD - specifies a password to use for Collins authentication - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; this can be used to run Ansible automation against different asset classes than server nodes, such as network switches and PDUs - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to /collins.ini If errors are encountered during operation, this script will return an exit code of 255; otherwise, it will return an exit code of 0. Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. Tested against Ansible 1.8.2 and Collins 1.3.0. """ # (c) 2014, Steve Salevan # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import argparse import ConfigParser import logging import os import re import sys from time import time import traceback import urllib try: import json except ImportError: import simplejson as json from six import iteritems from ansible.module_utils.urls import open_url class CollinsDefaults(object): ASSETS_API_ENDPOINT = '%s/api/assets' SPECIAL_ATTRIBUTES = set([ 'CREATED', 'DELETED', 'UPDATED', 'STATE', ]) LOG_FORMAT = '%(asctime)-15s %(message)s' class Error(Exception): pass class MaxRetriesError(Error): pass class CollinsInventory(object): def __init__(self): """ Constructs CollinsInventory object and reads all configuration. """ self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, filename=self.log_location) self.log = logging.getLogger('CollinsInventory') def _asset_get_attribute(self, asset, attrib): """ Returns a user-defined attribute from an asset if it exists; otherwise, returns None. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return asset['ATTRIBS'][attrib_block][attrib] return None def _asset_has_attribute(self, asset, attrib): """ Returns whether a user-defined attribute is present on an asset. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return True return False def run(self): """ Main execution path """ # Updates cache if cache is not present or has expired. successful = True if self.args.refresh_cache: successful = self.update_cache() elif not self.is_cache_valid(): successful = self.update_cache() else: successful = self.load_inventory_from_cache() successful &= self.load_cache_from_cache() data_to_print = "" # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory data_to_print = self.json_format_dict(self.inventory, self.args.pretty) else: # default action with no options data_to_print = self.json_format_dict(self.inventory, self.args.pretty) print(data_to_print) return successful def find_assets(self, attributes = {}, operation = 'AND'): """ Obtains Collins assets matching the provided attributes. """ # Formats asset search query to locate assets matching attributes, using # the CQL search feature as described here: # http://tumblr.github.io/collins/recipes.html attributes_query = [ '='.join(attr_pair) for attr_pair in iteritems(attributes) ] query_parameters = { 'details': ['True'], 'operation': [operation], 'query': attributes_query, 'remoteLookup': [str(self.query_remote_dcs)], 'size': [self.results_per_query], 'type': [self.collins_asset_type], } assets = [] cur_page = 0 num_retries = 0 # Locates all assets matching the provided query, exhausting pagination. while True: if num_retries == self.collins_max_retries: raise MaxRetriesError("Maximum of %s retries reached; giving up" % \ self.collins_max_retries) query_parameters['page'] = cur_page query_url = "%s?%s" % ( (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), urllib.urlencode(query_parameters, doseq=True) ) try: response = open_url(query_url, timeout=self.collins_timeout_secs, url_username=self.collins_username, url_password=self.collins_password) json_response = json.loads(response.read()) # Adds any assets found to the array of assets. assets += json_response['data']['Data'] # If we've retrieved all of our assets, breaks out of the loop. if len(json_response['data']['Data']) == 0: break cur_page += 1 num_retries = 0 except: self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) num_retries += 1 return assets def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): return True return False def read_settings(self): """ Reads the settings from the collins.ini file """ config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') self.collins_host = config.get('collins', 'host') self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) self.collins_timeout_secs = config.getint('collins', 'timeout_secs') self.collins_max_retries = config.getint('collins', 'max_retries') self.results_per_query = config.getint('collins', 'results_per_query') self.ip_address_index = config.getint('collins', 'ip_address_index') self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') cache_path = config.get('collins', 'cache_path') self.cache_path_cache = cache_path + \ '/ansible-collins-%s.cache' % self.collins_asset_type self.cache_path_inventory = cache_path + \ '/ansible-collins-%s.index' % self.collins_asset_type self.cache_max_age = config.getint('collins', 'cache_max_age') log_path = config.get('collins', 'log_path') self.log_location = log_path + '/ansible-collins.log' def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser( description='Produces an Ansible Inventory file based on Collins') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Collins ' \ '(default: False - use cache files)') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') self.args = parser.parse_args() def update_cache(self): """ Make calls to Collins and saves the output in a cache """ self.cache = dict() self.inventory = dict() # Locates all server assets from Collins. try: server_assets = self.find_assets() except: self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) return False for asset in server_assets: # Determines the index to retrieve the asset's IP address either by an # attribute set on the Collins asset or the pre-configured value. if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') try: ip_index = int(ip_index) except: self.log.error( "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, ip_index) else: ip_index = self.ip_address_index asset['COLLINS'] = {} # Attempts to locate the asset's primary identifier (hostname or IP address), # which will be used to index the asset throughout the Ansible inventory. if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') elif 'ADDRESSES' not in asset: self.log.warning("No IP addresses found for asset '%s', skipping", asset) continue elif len(asset['ADDRESSES']) < ip_index + 1: self.log.warning( "No IP address found at index %s for asset '%s', skipping", ip_index, asset) continue else: asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] # Adds an asset index to the Ansible inventory based upon unpacking # the name of the asset's current STATE from its dictionary. if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: state_inventory_key = self.to_safe( 'STATE-%s' % asset['ASSET']['STATE']['NAME']) self.push(self.inventory, state_inventory_key, asset_identifier) # Indexes asset by all user-defined Collins attributes. if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): for attrib in asset['ATTRIBS'][attrib_block].keys(): asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by all built-in Collins attributes. for attribute in asset['ASSET'].keys(): if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: attribute_val = asset['ASSET'][attribute] if attribute_val is not None: attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by hardware product information. if 'HARDWARE' in asset: if 'PRODUCT' in asset['HARDWARE']['BASE']: product = asset['HARDWARE']['BASE']['PRODUCT'] if product: product_key = self.to_safe( 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) self.push(self.inventory, product_key, asset_identifier) # Indexing now complete, adds the host details to the asset cache. self.cache[asset_identifier] = asset try: self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) except: self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) return False return True def push(self, dictionary, key, value): """ Adds a value to a list at a dictionary key, creating the list if it doesn't exist. """ if key not in dictionary: dictionary[key] = [] dictionary[key].append(value) def get_host_info(self): """ Get variables about a specific host. """ if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if not self.args.host in self.cache: # try updating the cache self.update_cache() if not self.args.host in self.cache: # host might not exist anymore return self.json_format_dict({}, self.args.pretty) return self.json_format_dict(self.cache[self.args.host], self.args.pretty) def load_inventory_from_cache(self): """ Reads the index from the cache file sets self.index """ try: cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) return True except: self.log.error("Error while loading inventory:\n%s", traceback.format_exc()) self.inventory = {} return False def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ try: cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) return True except: self.log.error("Error while loading host cache:\n%s", traceback.format_exc()) self.cache = {} return False def write_to_cache(self, data, filename): """ Writes data in JSON format to a specified file. """ json_data = self.json_format_dict(data, self.args.pretty) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) if __name__ in '__main__': inventory = CollinsInventory() if inventory.run(): sys.exit(0) else: sys.exit(-1) ansible-2.1.1.0/contrib/inventory/consul.ini0000664000175400017540000000233612746444466022152 0ustar jenkinsjenkins00000000000000# Ansible Consul external inventory script settings. [consul] # restrict included nodes to those from this datacenter #datacenter = nyc1 # url of the the consul cluster to query #url = http://demo.consul.io url = http://localhost:8500 # suffix added to each service to create a group name e.g Service of 'redis' and # a suffix of '_servers' will add each address to the group name 'redis_servers' servers_suffix = _servers # if specified then the inventory will generate domain names that will resolve # via Consul's inbuilt DNS. #domain=consul # make groups from service tags. the name of the group is derived from the # service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] # will create groups nginx_master and nginx_v1 tags = true # looks up the node name at the given path for a list of groups to which the # node should be added. kv_groups=ansible/groups # looks up the node name at the given path for a json dictionary of metadata that # should be attached as metadata for the node kv_metadata=ansible/metadata # looks up the health of each service and adds the node to 'up' and 'down' groups # based on the service availibility availability = true available_suffix = _up unavailable_suffix = _down ansible-2.1.1.0/contrib/inventory/consul_io.py0000775000175400017540000004075112746444466022520 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # (c) 2015, Steve Gargan # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Consul.io inventory script (http://consul.io) ====================================== Generates Ansible inventory from nodes in a Consul cluster. This script will group nodes by: - datacenter, - registered service - service tags - service status - values from the k/v store This script can be run with the switches --list as expected groups all the nodes in all datacenters --datacenter, to restrict the nodes to a single datacenter --host to restrict the inventory to a single named node. (requires datacenter config) The configuration for this plugin is read from a consul.ini file located in the same directory as this inventory script. All config options in the config file are optional except the host and port, which must point to a valid agent or server running the http api. For more information on enabling the endpoint see. http://www.consul.io/docs/agent/options.html Other options include: 'datacenter': which restricts the included nodes to those from the given datacenter 'domain': if specified then the inventory will generate domain names that will resolve via Consul's inbuilt DNS. The name is derived from the node name, datacenter and domain .node... Note that you will need to have consul hooked into your DNS server for these to resolve. See the consul DNS docs for more info. which restricts the included nodes to those from the given datacenter 'servers_suffix': defining the a suffix to add to the service name when creating the service group. e.g Service name of 'redis' and a suffix of '_servers' will add each nodes address to the group name 'redis_servers'. No suffix is added if this is not set 'tags': boolean flag defining if service tags should be used to create Inventory groups e.g. an nginx service with the tags ['master', 'v1'] will create groups nginx_master and nginx_v1 to which the node running the service will be added. No tag groups are created if this is missing. 'token': ACL token to use to authorize access to the key value store. May be required to retrieve the kv_groups and kv_metadata based on your consul configuration. 'kv_groups': This is used to lookup groups for a node in the key value store. It specifies a path to which each discovered node's name will be added to create a key to query the key/value store. There it expects to find a comma separated list of group names to which the node should be added e.g. if the inventory contains 'nyc-web-1' and kv_groups = 'ansible/groups' then the key 'v1/kv/ansible/groups/nyc-web-1' will be queried for a group list. If this query returned 'test,honeypot' then the node address to both groups. 'kv_metadata': kv_metadata is used to lookup metadata for each discovered node. Like kv_groups above it is used to build a path to lookup in the kv store where it expects to find a json dictionary of metadata entries. If found, each key/value pair in the dictionary is added to the metadata for the node. 'availability': if true then availability groups will be created for each service. The node will be added to one of the groups based on the health status of the service. The group name is derived from the service name and the configurable availability suffixes 'available_suffix': suffix that should be appended to the service availability groups for available services e.g. if the suffix is '_up' and the service is nginx, then nodes with healthy nginx services will be added to the nginix_up group. Defaults to '_available' 'unavailable_suffix': as above but for unhealthy services, defaults to '_unavailable' Note that if the inventory discovers an 'ssh' service running on a node it will register the port as ansible_ssh_port in the node's metadata and this port will be used to access the machine. ``` ''' import os import re import argparse from time import time import sys import ConfigParser import urllib, urllib2, base64 def get_log_filename(): tty_filename = '/dev/tty' stdout_filename = '/dev/stdout' if not os.path.exists(tty_filename): return stdout_filename if not os.access(tty_filename, os.W_OK): return stdout_filename if os.getenv('TEAMCITY_VERSION'): return stdout_filename return tty_filename def setup_logging(): filename = get_log_filename() import logging.config logging.config.dictConfig({ 'version': 1, 'formatters': { 'simple': { 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', }, }, 'root': { 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'), 'handlers': ['console'], }, 'handlers': { 'console': { 'class': 'logging.FileHandler', 'filename': filename, 'formatter': 'simple', }, }, 'loggers': { 'iso8601': { 'qualname': 'iso8601', 'level': 'INFO', }, }, }) logger = logging.getLogger('consul_io.py') logger.debug('Invoked with %r', sys.argv) if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'): setup_logging() try: import json except ImportError: import simplejson as json try: import consul except ImportError as e: print("""failed=True msg='python-consul required for this module. see http://python-consul.readthedocs.org/en/latest/#installation'""") sys.exit(1) from six import iteritems class ConsulInventory(object): def __init__(self): ''' Create an inventory based on the catalog of nodes and services registered in a consul cluster''' self.node_metadata = {} self.nodes = {} self.nodes_by_service = {} self.nodes_by_tag = {} self.nodes_by_datacenter = {} self.nodes_by_kv = {} self.nodes_by_availability = {} self.current_dc = None config = ConsulConfig() self.config = config self.consul_api = config.get_consul_api() if config.has_config('datacenter'): if config.has_config('host'): self.load_data_for_node(config.host, config.datacenter) else: self.load_data_for_datacenter(config.datacenter) else: self.load_all_data_consul() self.combine_all_results() print(json.dumps(self.inventory, sort_keys=True, indent=2)) def load_all_data_consul(self): ''' cycle through each of the datacenters in the consul catalog and process the nodes in each ''' self.datacenters = self.consul_api.catalog.datacenters() for datacenter in self.datacenters: self.current_dc = datacenter self.load_data_for_datacenter(datacenter) def load_availability_groups(self, node, datacenter): '''check the health of each service on a node and add add the node to either an 'available' or 'unavailable' grouping. The suffix for each group can be controlled from the config''' if self.config.has_config('availability'): for service_name, service in iteritems(node['Services']): for node in self.consul_api.health.service(service_name)[1]: for check in node['Checks']: if check['ServiceName'] == service_name: ok = 'passing' == check['Status'] if ok: suffix = self.config.get_availability_suffix( 'available_suffix', '_available') else: suffix = self.config.get_availability_suffix( 'unavailable_suffix', '_unavailable') self.add_node_to_map(self.nodes_by_availability, service_name + suffix, node['Node']) def load_data_for_datacenter(self, datacenter): '''processes all the nodes in a particular datacenter''' index, nodes = self.consul_api.catalog.nodes(dc=datacenter) for node in nodes: self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) self.load_data_for_node(node['Node'], datacenter) def load_data_for_node(self, node, datacenter): '''loads the data for a sinle node adding it to various groups based on metadata retrieved from the kv store and service availability''' index, node_data = self.consul_api.catalog.node(node, dc=datacenter) node = node_data['Node'] self.add_node_to_map(self.nodes, 'all', node) self.add_metadata(node_data, "consul_datacenter", datacenter) self.add_metadata(node_data, "consul_nodename", node['Node']) self.load_groups_from_kv(node_data) self.load_node_metadata_from_kv(node_data) self.load_availability_groups(node_data, datacenter) for name, service in node_data['Services'].items(): self.load_data_from_service(name, service, node_data) def load_node_metadata_from_kv(self, node_data): ''' load the json dict at the metadata path defined by the kv_metadata value and the node name add each entry in the dictionary to the the node's metadata ''' node = node_data['Node'] if self.config.has_config('kv_metadata'): key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) index, metadata = self.consul_api.kv.get(key) if metadata and metadata['Value']: try: metadata = json.loads(metadata['Value']) for k,v in metadata.items(): self.add_metadata(node_data, k, v) except: pass def load_groups_from_kv(self, node_data): ''' load the comma separated list of groups at the path defined by the kv_groups config value and the node name add the node address to each group found ''' node = node_data['Node'] if self.config.has_config('kv_groups'): key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) index, groups = self.consul_api.kv.get(key) if groups and groups['Value']: for group in groups['Value'].split(','): self.add_node_to_map(self.nodes_by_kv, group.strip(), node) def load_data_from_service(self, service_name, service, node_data): '''process a service registered on a node, adding the node to a group with the service name. Each service tag is extracted and the node is added to a tag grouping also''' self.add_metadata(node_data, "consul_services", service_name, True) if self.is_service("ssh", service_name): self.add_metadata(node_data, "ansible_ssh_port", service['Port']) if self.config.has_config('servers_suffix'): service_name = service_name + self.config.servers_suffix self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) self.extract_groups_from_tags(service_name, service, node_data) def is_service(self, target, name): return name and (name.lower() == target.lower()) def extract_groups_from_tags(self, service_name, service, node_data): '''iterates each service tag and adds the node to groups derived from the service and tag names e.g. nginx_master''' if self.config.has_config('tags') and service['Tags']: tags = service['Tags'] self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) for tag in service['Tags']: tagname = service_name +'_'+tag self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) def combine_all_results(self): '''prunes and sorts all groupings for combination into the final map''' self.inventory = {"_meta": { "hostvars" : self.node_metadata}} groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] for grouping in groupings: for name, addresses in grouping.items(): self.inventory[name] = sorted(list(set(addresses))) def add_metadata(self, node_data, key, value, is_list = False): ''' Pushed an element onto a metadata dict for the node, creating the dict if it doesn't exist ''' key = self.to_safe(key) node = self.get_inventory_name(node_data['Node']) if node in self.node_metadata: metadata = self.node_metadata[node] else: metadata = {} self.node_metadata[node] = metadata if is_list: self.push(metadata, key, value) else: metadata[key] = value def get_inventory_name(self, node_data): '''return the ip or a node name that can be looked up in consul's dns''' domain = self.config.domain if domain: node_name = node_data['Node'] if self.current_dc: return '%s.node.%s.%s' % ( node_name, self.current_dc, domain) else: return '%s.node.%s' % ( node_name, domain) else: return node_data['Address'] def add_node_to_map(self, map, name, node): self.push(map, name, self.get_inventory_name(node)) def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' key = self.to_safe(key) if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub('[^A-Za-z0-9\-\.]', '_', word) def sanitize_dict(self, d): new_dict = {} for k, v in d.items(): if v != None: new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) return new_dict def sanitize_list(self, seq): new_seq = [] for d in seq: new_seq.append(self.sanitize_dict(d)) return new_seq class ConsulConfig(dict): def __init__(self): self.read_settings() self.read_cli_args() def has_config(self, name): if hasattr(self, name): return getattr(self, name) else: return False def read_settings(self): ''' Reads the settings from the consul.ini file ''' config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') config_options = ['host', 'token', 'datacenter', 'servers_suffix', 'tags', 'kv_metadata', 'kv_groups', 'availability', 'unavailable_suffix', 'available_suffix', 'url', 'domain'] for option in config_options: value = None if config.has_option('consul', option): value = config.get('consul', option) setattr(self, option, value) def read_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description= 'Produce an Ansible Inventory file based nodes in a Consul cluster') parser.add_argument('--list', action='store_true', help='Get all inventory variables from all nodes in the consul cluster') parser.add_argument('--host', action='store', help='Get all inventory variables about a specific consul node, \ requires datacenter set in consul.ini.') parser.add_argument('--datacenter', action='store', help='Get all inventory about a specific consul datacenter') args = parser.parse_args() arg_names = ['host', 'datacenter'] for arg in arg_names: if getattr(args, arg): setattr(self, arg, getattr(args, arg)) def get_availability_suffix(self, suffix, default): if self.has_config(suffix): return self.has_config(suffix) return default def get_consul_api(self): '''get an instance of the api based on the supplied configuration''' host = 'localhost' port = 8500 token = None if hasattr(self, 'url'): from urlparse import urlparse o = urlparse(self.url) if o.hostname: host = o.hostname if o.port: port = o.port if hasattr(self, 'token'): token = self.token if not token: token = 'anonymous' return consul.Consul(host=host, port=port, token=token) ConsulInventory() ansible-2.1.1.0/contrib/inventory/digital_ocean.ini0000664000175400017540000000164512746444466023433 0ustar jenkinsjenkins00000000000000# Ansible DigitalOcean external inventory script settings # [digital_ocean] # The module needs your DigitalOcean API Token. # It may also be specified on the command line via --api-token # or via the environment variables DO_API_TOKEN or DO_API_KEY # #api_token = 123456abcdefg # API calls to DigitalOcean may be slow. For this reason, we cache the results # of an API call. Set this to the path you want cache files to be written to. # One file will be written to this directory: # - ansible-digital_ocean.cache # cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # cache_max_age = 300 # Use the private network IP address instead of the public when available. # use_private_network = False # Pass variables to every group, e.g.: # # group_variables = { 'ansible_user': 'root' } # group_variables = {} ansible-2.1.1.0/contrib/inventory/digital_ocean.py0000775000175400017540000004266612746444466023317 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python ''' DigitalOcean external inventory script ====================================== Generates Ansible inventory of DigitalOcean Droplets. In addition to the --list and --host options used by Ansible, there are options for generating JSON of other DigitalOcean data. This is useful when creating droplets. For example, --regions will return all the DigitalOcean Regions. This information can also be easily found in the cache file, whose default location is /tmp/ansible-digital_ocean.cache). The --pretty (-p) option pretty-prints the output for better human readability. ---- Although the cache stores all the information received from DigitalOcean, the cache is not used for current droplet information (in --list, --host, --all, and --droplets). This is so that accurate droplet information is always found. You can force this script to use the cache with --force-cache. ---- Configuration is read from `digital_ocean.ini`, then from environment variables, then and command-line arguments. Most notably, the DigitalOcean API Token must be specified. It can be specified in the INI file or with the following environment variables: export DO_API_TOKEN='abc123' or export DO_API_KEY='abc123' Alternatively, it can be passed on the command-line with --api-token. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) is to use the output of the --env option with export: export $(digital_ocean.py --env) ---- The following groups are generated from --list: - ID (droplet ID) - NAME (droplet NAME) - image_ID - image_NAME - distro_NAME (distribution NAME from image) - region_NAME - size_NAME - status_STATUS When run against a specific host, this script returns the following variables: - do_backup_ids - do_created_at - do_disk - do_features - list - do_id - do_image - object - do_ip_address - do_private_ip_address - do_kernel - object - do_locked - de_memory - do_name - do_networks - object - do_next_backup_window - do_region - object - do_size - object - do_size_slug - do_snapshot_ids - list - do_status - do_vcpus ----- ``` usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--pretty] [--cache-path CACHE_PATH] [--cache-max_age CACHE_MAX_AGE] [--force-cache] [--refresh-cache] [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials optional arguments: -h, --help show this help message and exit --list List all active Droplets as Ansible inventory (default: True) --host HOST Get all Ansible inventory variables about a specific Droplet --all List all DigitalOcean information as JSON --droplets List Droplets as JSON --regions List Regions as JSON --images List Images as JSON --sizes List Sizes as JSON --ssh-keys List SSH keys as JSON --domains List Domains as JSON --pretty, -p Pretty-print results --cache-path CACHE_PATH Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) --force-cache Only use data from the cache --refresh-cache Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) --api-token API_TOKEN, -a API_TOKEN DigitalOcean API Token ``` ''' # (c) 2013, Evan Wies # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import os import sys import re import argparse from time import time import ConfigParser import ast try: import json except ImportError: import simplejson as json try: from dopy.manager import DoError, DoManager except ImportError as e: print("failed=True msg='`dopy` library required for this script'") sys.exit(1) class DigitalOceanInventory(object): ########################################################################### # Main execution path ########################################################################### def __init__(self): ''' Main execution path ''' # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory # Define defaults self.cache_path = '.' self.cache_max_age = 0 self.use_private_network = False self.group_variables = {} # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() self.read_cli_args() # Verify credentials were set if not hasattr(self, 'api_token'): print('''Could not find values for DigitalOcean api_token. They must be specified via either ini file, command line argument (--api-token), or environment variables (DO_API_TOKEN)''') sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: print("DO_API_TOKEN=%s" % self.api_token) sys.exit(0) # Manage cache self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" self.cache_refreshed = False if self.is_cache_valid: self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: print('''Cache is empty and --force-cache was specified''') sys.exit(-1) self.manager = DoManager(None, self.api_token, api_version=2) # Pick the json_data to print based on the CLI command if self.args.droplets: self.load_from_digital_ocean('droplets') json_data = {'droplets': self.data['droplets']} elif self.args.regions: self.load_from_digital_ocean('regions') json_data = {'regions': self.data['regions']} elif self.args.images: self.load_from_digital_ocean('images') json_data = {'images': self.data['images']} elif self.args.sizes: self.load_from_digital_ocean('sizes') json_data = {'sizes': self.data['sizes']} elif self.args.ssh_keys: self.load_from_digital_ocean('ssh_keys') json_data = {'ssh_keys': self.data['ssh_keys']} elif self.args.domains: self.load_from_digital_ocean('domains') json_data = {'domains': self.data['domains']} elif self.args.all: self.load_from_digital_ocean() json_data = self.data elif self.args.host: json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default self.load_from_digital_ocean('droplets') self.build_inventory() json_data = self.inventory if self.cache_refreshed: self.write_to_cache() if self.args.pretty: print(json.dumps(json_data, sort_keys=True, indent=2)) else: print(json.dumps(json_data)) # That's all she wrote... ########################################################################### # Script configuration ########################################################################### def read_settings(self): ''' Reads the settings from the digital_ocean.ini file ''' config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') # Credentials if config.has_option('digital_ocean', 'api_token'): self.api_token = config.get('digital_ocean', 'api_token') # Cache related if config.has_option('digital_ocean', 'cache_path'): self.cache_path = config.get('digital_ocean', 'cache_path') if config.has_option('digital_ocean', 'cache_max_age'): self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') # Private IP Address if config.has_option('digital_ocean', 'use_private_network'): self.use_private_network = config.get('digital_ocean', 'use_private_network') # Group variables if config.has_option('digital_ocean', 'group_variables'): self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials if os.getenv("DO_API_TOKEN"): self.api_token = os.getenv("DO_API_TOKEN") if os.getenv("DO_API_KEY"): self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON') parser.add_argument('--regions', action='store_true', help='List Regions as JSON') parser.add_argument('--images', action='store_true', help='List Images as JSON') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--domains', action='store_true',help='List Domains as JSON') parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') self.args = parser.parse_args() if self.args.api_token: self.api_token = self.args.api_token # Make --list default if none of the other commands are specified if (not self.args.droplets and not self.args.regions and not self.args.images and not self.args.sizes and not self.args.ssh_keys and not self.args.domains and not self.args.all and not self.args.host): self.args.list = True ########################################################################### # Data Management ########################################################################### def load_from_digital_ocean(self, resource=None): '''Get JSON from DigitalOcean API''' if self.args.force_cache: return # We always get fresh droplets if self.is_cache_valid() and not (resource=='droplets' or resource is None): return if self.args.refresh_cache: resource=None if resource == 'droplets' or resource is None: self.data['droplets'] = self.manager.all_active_droplets() self.cache_refreshed = True if resource == 'regions' or resource is None: self.data['regions'] = self.manager.all_regions() self.cache_refreshed = True if resource == 'images' or resource is None: self.data['images'] = self.manager.all_images(filter=None) self.cache_refreshed = True if resource == 'sizes' or resource is None: self.data['sizes'] = self.manager.sizes() self.cache_refreshed = True if resource == 'ssh_keys' or resource is None: self.data['ssh_keys'] = self.manager.all_ssh_keys() self.cache_refreshed = True if resource == 'domains' or resource is None: self.data['domains'] = self.manager.all_domains() self.cache_refreshed = True def build_inventory(self): '''Build Ansible inventory of droplets''' self.inventory = {} # add all droplets by id and name for droplet in self.data['droplets']: #when using private_networking, the API reports the private one in "ip_address". if 'private_networking' in droplet['features'] and not self.use_private_network: for net in droplet['networks']['v4']: if net['type']=='public': dest=net['ip_address'] else: continue else: dest = droplet['ip_address'] dest = { 'hosts': [ dest ], 'vars': self.group_variables } self.inventory[droplet['id']] = dest self.inventory[droplet['name']] = dest self.inventory['region_' + droplet['region']['slug']] = dest self.inventory['image_' + str(droplet['image']['id'])] = dest self.inventory['size_' + droplet['size']['slug']] = dest image_slug = droplet['image']['slug'] if image_slug: self.inventory['image_' + self.to_safe(image_slug)] = dest else: image_name = droplet['image']['name'] if image_name: self.inventory['image_' + self.to_safe(image_name)] = dest self.inventory['distro_' + self.to_safe(droplet['image']['distribution'])] = dest self.inventory['status_' + droplet['status']] = dest def load_droplet_variables_for_host(self): '''Generate a JSON response to a --host call''' host = int(self.args.host) droplet = self.manager.show_droplet(host) # Put all the information in a 'do_' namespace info = {} for k, v in droplet.items(): info['do_'+k] = v return {'droplet': info} ########################################################################### # Cache Management ########################################################################### def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_filename): mod_time = os.path.getmtime(self.cache_filename) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_from_cache(self): ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' try: cache = open(self.cache_filename, 'r') json_data = cache.read() cache.close() data = json.loads(json_data) except IOError: data = {'data': {}, 'inventory': {}} self.data = data['data'] self.inventory = data['inventory'] def write_to_cache(self): ''' Writes data in JSON format to a file ''' data = { 'data': self.data, 'inventory': self.inventory } json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(self.cache_filename, 'w') cache.write(json_data) cache.close() ########################################################################### # Utilities ########################################################################### def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) ########################################################################### # Run the script DigitalOceanInventory() ansible-2.1.1.0/contrib/inventory/docker.py0000775000175400017540000010147412746444466021775 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # (c) 2016 Paul Durivage # Chris Houseknecht # James Tanner # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # DOCUMENTATION = ''' Docker Inventory Script ======================= The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic because the inventory is generated at run-time rather than being read from a static file. The script generates the inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the script contacts can be defined using environment variables or a configuration file. Requirements ------------ Using the docker modules requires having docker-py installed on the host running Ansible. To install docker-py: pip install docker-py Run for Specific Host --------------------- When run for a specific container using the --host option this script returns the following hostvars: { "ansible_ssh_host": "", "ansible_ssh_port": 0, "docker_apparmorprofile": "", "docker_args": [], "docker_config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/hello" ], "Domainname": "", "Entrypoint": null, "Env": null, "Hostname": "9f2f80b0a702", "Image": "hello-world", "Labels": {}, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "docker_created": "2016-04-18T02:05:59.659599249Z", "docker_driver": "aufs", "docker_execdriver": "native-0.2", "docker_execids": null, "docker_graphdriver": { "Data": null, "Name": "aufs" }, "docker_hostconfig": { "Binds": null, "BlkioWeight": 0, "CapAdd": null, "CapDrop": null, "CgroupParent": "", "ConsoleSize": [ 0, 0 ], "ContainerIDFile": "", "CpuPeriod": 0, "CpuQuota": 0, "CpuShares": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": null, "Dns": null, "DnsOptions": null, "DnsSearch": null, "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "KernelMemory": 0, "Links": null, "LogConfig": { "Config": {}, "Type": "json-file" }, "LxcConf": null, "Memory": 0, "MemoryReservation": 0, "MemorySwap": 0, "MemorySwappiness": null, "NetworkMode": "default", "OomKillDisable": false, "PidMode": "host", "PortBindings": null, "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "RestartPolicy": { "MaximumRetryCount": 0, "Name": "" }, "SecurityOpt": [ "label:disable" ], "UTSMode": "", "Ulimits": null, "VolumeDriver": "", "VolumesFrom": null }, "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname", "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts", "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14", "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7", "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14-json.log", "docker_mountlabel": "", "docker_mounts": [], "docker_name": "/hello-world", "docker_networksettings": { "Bridge": "", "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "HairpinMode": false, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "MacAddress": "", "Networks": { "bridge": { "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" } }, "Ports": null, "SandboxID": "", "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null }, "docker_path": "/hello", "docker_processlabel": "", "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf", "docker_restartcount": 0, "docker_short_id": "9f2f80b0a7023", "docker_state": { "Dead": false, "Error": "", "ExitCode": 0, "FinishedAt": "2016-04-18T02:06:00.296619369Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2016-04-18T02:06:00.272065041Z", "Status": "exited" } } Groups ------ When run in --list mode (the default), container instances are grouped by: - container id - container name - container short id - image_name (image_) - docker_host - running - stopped Configuration: -------------- You can control the behavior of the inventory script by passing arguments, defining environment variables, or creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence is command line args, then the docker.yml file and finally environment variables. Environment variables: ...................... To connect to a single Docker API the following variables can be defined in the environment to control the connection options. These are the same environment variables used by the Docker modules. DOCKER_HOST The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock. DOCKER_API_VERSION: The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by docker-py. DOCKER_TIMEOUT: The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds. DOCKER_TLS: Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to False. DOCKER_TLS_VERIFY: Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is False DOCKER_TLS_HOSTNAME: When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to localhost. DOCKER_CERT_PATH: Path to the directory containing the client certificate, client key and CA certificate. DOCKER_SSL_VERSION: Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing was 1.0 In addition to the connection variables there are a couple variables used to control the execution and output of the script: DOCKER_CONFIG_FILE Path to the configuration file. Defaults to ./docker.yml. DOCKER_PRIVATE_SSH_PORT: The private port (container port) on which SSH is listening for connections. Defaults to 22. DOCKER_DEFAULT_IP: The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. Configuration File .................. Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory. The default name of the file is derived from the name of the inventory script. By default the script will look for basename of the script (i.e. docker) with an extension of '.yml'. You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment. Here's what you can define in docker_inventory.yml: defaults Defines a default connection. Defaults will be taken from this and applied to any values not provided for a host defined in the hosts list. hosts If you wish to get inventory from more than one Docker host, define a hosts list. For the default host and each host in the hosts list define the following attributes: host: description: The URL or Unix socket path used to connect to the Docker API. required: yes tls: description: Connect using TLS without verifying the authenticity of the Docker host server. default: false required: false tls_verify: description: Connect using TLS without verifying the authenticity of the Docker host server. default: false required: false cert_path: description: Path to the client's TLS certificate file. default: null required: false cacert_path: description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. default: null required: false key_path: description: Path to the client's TLS key file. default: null required: false version: description: The Docker API version. required: false default: will be supplied by the docker-py module. timeout: description: The amount of time in seconds to wait on an API response. required: false default: 60 default_ip: description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. required: false default: 127.0.0.1 private_ssh_port: description: The port containers use for SSH required: false default: 22 Examples -------- # Connect to the Docker API on localhost port 4243 and format the JSON output DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty # Any container's ssh port exposed on 0.0.0.0 will be mapped to # another IP address (where Ansible will attempt to connect via SSH) DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty # Run as input to a playbook: ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml # Simple playbook to invoke with the above example: - name: Test docker_inventory hosts: all connection: local gather_facts: no tasks: - debug: msg="Container - {{ inventory_hostname }}" ''' import os import sys import json import argparse import re import yaml from collections import defaultdict # Manipulation of the path is needed because the docker-py # module is imported by the name docker, and because this file # is also named docker for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: try: del sys.path[sys.path.index(path)] except: pass HAS_DOCKER_PY = True HAS_DOCKER_ERROR = False try: from docker import Client from docker.errors import APIError, TLSParameterError from docker.tls import TLSConfig from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION except ImportError as exc: HAS_DOCKER_ERROR = str(exc) HAS_DOCKER_PY = False DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' DEFAULT_TLS = False DEFAULT_TLS_VERIFY = False DEFAULT_IP = '127.0.0.1' DEFAULT_SSH_PORT = '22' BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] DOCKER_ENV_ARGS = dict( config_file='DOCKER_CONFIG_FILE', docker_host='DOCKER_HOST', api_version='DOCKER_API_VERSION', cert_path='DOCKER_CERT_PATH', ssl_version='DOCKER_SSL_VERSION', tls='DOCKER_TLS', tls_verify='DOCKER_TLS_VERIFY', timeout='DOCKER_TIMEOUT', private_ssh_port='DOCKER_DEFAULT_SSH_PORT', default_ip='DOCKER_DEFAULT_IP', ) def fail(msg): sys.stderr.write("%s\n" % msg) sys.exit(1) def log(msg, pretty_print=False): if pretty_print: print(json.dumps(msg, sort_keys=True, indent=2)) else: print(msg + u'\n') class AnsibleDockerClient(Client): def __init__(self, auth_params, debug): self.auth_params = auth_params self.debug = debug self._connect_params = self._get_connect_params() try: super(AnsibleDockerClient, self).__init__(**self._connect_params) except APIError as exc: self.fail("Docker API error: %s" % exc) except Exception as exc: self.fail("Error connecting: %s" % exc) def fail(self, msg): fail(msg) def log(self, msg, pretty_print=False): if self.debug: log(msg, pretty_print) def _get_tls_config(self, **kwargs): self.log("get_tls_config:") for key in kwargs: self.log(" %s: %s" % (key, kwargs[key])) try: tls_config = TLSConfig(**kwargs) return tls_config except TLSParameterError as exc: self.fail("TLS config error: %s" % exc) def _get_connect_params(self): auth = self.auth_params self.log("auth params:") for key in auth: self.log(" %s: %s" % (key, auth[key])) if auth['tls'] or auth['tls_verify']: auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') if auth['tls'] and auth['cert_path'] and auth['key_path']: # TLS with certs and no host verification tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), verify=False, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls']: # TLS with no certs and not host verification tls_config = self._get_tls_config(verify=False, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: # TLS with certs and host verification if auth['cacert_path']: tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), ca_cert=auth['cacert_path'], verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) else: tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify'] and auth['cacert_path']: # TLS with cacert only tls_config = self._get_tls_config(ca_cert=auth['cacert_path'], assert_hostname=auth['tls_hostname'], verify=True, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify']: # TLS with verify and no certs tls_config = self._get_tls_config(verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) # No TLS return dict(base_url=auth['docker_host'], version=auth['api_version'], timeout=auth['timeout']) def _handle_ssl_error(self, error): match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) if match: msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \ "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \ "You may also use TLS without verification by setting the tls parameter to true." \ % (self.auth_params['tls_hostname'], match.group(1)) self.fail(msg) self.fail("SSL Exception: %s" % (error)) class EnvArgs(object): def __init__(self): self.config_file = None self.docker_host = None self.api_version = None self.cert_path = None self.ssl_version = None self.tls = None self.tls_verify = None self.tls_hostname = None self.timeout = None self.default_ssh_port = None self.default_ip = None class DockerInventory(object): def __init__(self): self._args = self._parse_cli_args() self._env_args = self._parse_env_args() self.groups = defaultdict(list) self.hostvars = defaultdict(dict) def run(self): config_from_file = self._parse_config_file() if not config_from_file: config_from_file = dict() docker_hosts = self.get_hosts(config_from_file) for host in docker_hosts: client = AnsibleDockerClient(host, self._args.debug) self.get_inventory(client, host) if not self._args.host: self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts] self.groups['_meta'] = dict( hostvars=self.hostvars ) print(self._json_format_dict(self.groups, pretty_print=self._args.pretty)) else: print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty)) sys.exit(0) def get_inventory(self, client, host): ssh_port = host.get('default_ssh_port') default_ip = host.get('default_ip') hostname = host.get('docker_host') try: containers = client.containers(all=True) except Exception as exc: self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc))) for container in containers: id = container.get('Id') short_id = id[:13] try: name = container.get('Names', list()).pop(0).lstrip('/') except IndexError: name = short_id if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]): try: inspect = client.inspect_container(id) except Exception as exc: self.fail("Error inspecting container %s - %s" % (name, str(exc))) running = inspect.get('State', dict()).get('Running') # Add container to groups image_name = inspect.get('Config', dict()).get('Image') if image_name: self.groups["image_%s" % (image_name)].append(name) self.groups[id].append(name) self.groups[name].append(name) if short_id not in self.groups.keys(): self.groups[short_id].append(name) self.groups[hostname].append(name) if running is True: self.groups['running'].append(name) else: self.groups['stopped'].append(name) # Figure ous ssh IP and Port try: # Lookup the public facing port Nat'ed to ssh port. port = client.port(container, ssh_port)[0] except (IndexError, AttributeError, TypeError): port = dict() try: ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] except KeyError: ip = '' facts = dict( ansible_ssh_host=ip, ansible_ssh_port=port.get('HostPort', int()), docker_name=name, docker_short_id=short_id ) for key in inspect: fact_key = self._slugify(key) facts[fact_key] = inspect.get(key) self.hostvars[name].update(facts) def _slugify(self, value): return 'docker_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) def get_hosts(self, config): ''' Determine the list of docker hosts we need to talk to. :param config: dictionary read from config file. can be empty. :return: list of connection dictionaries ''' hosts = list() hosts_list = config.get('hosts') defaults = config.get('defaults', dict()) self.log('defaults:') self.log(defaults, pretty_print=True) def_host = defaults.get('host') def_tls = defaults.get('tls') def_tls_verify = defaults.get('tls_verify') def_tls_hostname = defaults.get('tls_hostname') def_ssl_version = defaults.get('ssl_version') def_cert_path = defaults.get('cert_path') def_cacert_path = defaults.get('cacert_path') def_key_path = defaults.get('key_path') def_version = defaults.get('version') def_timeout = defaults.get('timeout') def_ip = defaults.get('default_ip') def_ssh_port = defaults.get('private_ssh_port') if hosts_list: # use hosts from config file for host in hosts_list: docker_host = host.get('host') or def_host or self._args.docker_host or \ self._env_args.docker_host or DEFAULT_DOCKER_HOST api_version = host.get('version') or def_version or self._args.api_version or \ self._env_args.api_version or DEFAULT_DOCKER_API_VERSION tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \ self._env_args.tls_hostname tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \ self._env_args.tls_verify or DEFAULT_TLS_VERIFY tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \ self._env_args.ssl_version cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \ self._env_args.cert_path if cert_path and cert_path == self._env_args.cert_path: cert_path = os.path.join(cert_path, 'cert.pem') cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \ self._env_args.cert_path if cacert_path and cacert_path == self._env_args.cert_path: cacert_path = os.path.join(cacert_path, 'ca.pem') key_path = host.get('key_path') or def_key_path or self._args.key_path or \ self._env_args.cert_path if key_path and key_path == self._env_args.cert_path: key_path = os.path.join(key_path, 'key.pem') timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \ DEFAULT_TIMEOUT_SECONDS default_ip = host.get('default_ip') or def_ip or self._args.default_ip_address or \ DEFAULT_IP default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \ DEFAULT_SSH_PORT host_dict = dict( docker_host=docker_host, api_version=api_version, tls=tls, tls_verify=tls_verify, tls_hostname=tls_hostname, cert_path=cert_path, cacert_path=cacert_path, key_path=key_path, ssl_version=ssl_version, timeout=timeout, default_ip=default_ip, default_ssh_port=default_ssh_port, ) hosts.append(host_dict) else: # use default definition docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST api_version = def_version or self._args.api_version or self._env_args.api_version or \ DEFAULT_DOCKER_API_VERSION tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path if cert_path and cert_path == self._env_args.cert_path: cert_path = os.path.join(cert_path, 'cert.pem') cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path if cacert_path and cacert_path == self._env_args.cert_path: cacert_path = os.path.join(cacert_path, 'ca.pem') key_path = def_key_path or self._args.key_path or self._env_args.cert_path if key_path and key_path == self._env_args.cert_path: key_path = os.path.join(key_path, 'key.pem') timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS default_ip = def_ip or self._args.default_ip_address or DEFAULT_IP default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT host_dict = dict( docker_host=docker_host, api_version=api_version, tls=tls, tls_verify=tls_verify, tls_hostname=tls_hostname, cert_path=cert_path, cacert_path=cacert_path, key_path=key_path, ssl_version=ssl_version, timeout=timeout, default_ip=default_ip, default_ssh_port=default_ssh_port, ) hosts.append(host_dict) self.log("hosts: ") self.log(hosts, pretty_print=True) return hosts def _parse_config_file(self): config = dict() config_path = None if self._args.config_file: config_path = self._args.config_file elif self._env_args.config_file: config_path = self._env_args.config_file if config_path: try: config_file = os.path.abspath(config_path) except: config_file = None if config_file and os.path.exists(config_file): with open(config_file) as f: try: config = yaml.safe_load(f.read()) except Exception as exc: self.fail("Error: parsing %s - %s" % (config_path, str(exc))) return config def log(self, msg, pretty_print=False): if self._args.debug: log(msg, pretty_print) def fail(self, msg): fail(msg) def _parse_env_args(self): args = EnvArgs() for key, value in DOCKER_ENV_ARGS.items(): if os.environ.get(value): val = os.environ.get(value) if val in BOOLEANS_TRUE: val = True if val in BOOLEANS_FALSE: val = False setattr(args, key, val) return args def _parse_cli_args(self): # Parse command line arguments basename = os.path.splitext(os.path.basename(__file__))[0] default_config = basename + '.yml' parser = argparse.ArgumentParser( description='Return Ansible inventory for one or more Docker hosts.') parser.add_argument('--list', action='store_true', default=True, help='List all containers (default: True)') parser.add_argument('--debug', action='store_true', default=False, help='Send debug messages to STDOUT') parser.add_argument('--host', action='store', help='Only get information for a specific container.') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output(default: False)') parser.add_argument('--config-file', action='store', default=default_config, help="Name of the config file to use. Default is %s" % (default_config)) parser.add_argument('--docker-host', action='store', default=None, help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s" % (DEFAULT_DOCKER_HOST)) parser.add_argument('--tls-hostname', action='store', default='localhost', help="Host name to expect in TLS certs. Defaults to 'localhost'") parser.add_argument('--api-version', action='store', default=None, help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION)) parser.add_argument('--timeout', action='store', default=None, help="Docker connection timeout in seconds. Defaults to %s" % (DEFAULT_TIMEOUT_SECONDS)) parser.add_argument('--cacert-path', action='store', default=None, help="Path to the TLS certificate authority pem file.") parser.add_argument('--cert-path', action='store', default=None, help="Path to the TLS certificate pem file.") parser.add_argument('--key-path', action='store', default=None, help="Path to the TLS encryption key pem file.") parser.add_argument('--ssl-version', action='store', default=None, help="TLS version number") parser.add_argument('--tls', action='store_true', default=None, help="Use TLS. Defaults to %s" % (DEFAULT_TLS)) parser.add_argument('--tls-verify', action='store_true', default=None, help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY)) parser.add_argument('--private-ssh-port', action='store', default=None, help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT)) parser.add_argument('--default-ip-address', action='store', default=None, help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP)) return parser.parse_args() def _json_format_dict(self, data, pretty_print=False): # format inventory data for output if pretty_print: return json.dumps(data, sort_keys=True, indent=4) else: return json.dumps(data) def main(): if not HAS_DOCKER_PY: fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR)) DockerInventory().run() main() ansible-2.1.1.0/contrib/inventory/docker.yml0000664000175400017540000000417612746444466022144 0ustar jenkinsjenkins00000000000000# This is the configuration file for the Docker inventory script: docker_inventory.py. # # You can define the following in this file: # # defaults # Defines a default connection. Defaults will be taken from this and applied to any values not provided # for a host defined in the hosts list. # # hosts # If you wish to get inventory from more than one Docker host, define a hosts list. # # For the default host and each host in the hosts list define the following attributes: # # host: # description: The URL or Unix socket path used to connect to the Docker API. # required: yes # # tls: # description: Connect using TLS without verifying the authenticity of the Docker host server. # default: false # required: false # # tls_verify: # description: Connect using TLS without verifying the authenticity of the Docker host server. # default: false # required: false # # cert_path: # description: Path to the client's TLS certificate file. # default: null # required: false # # cacert_path: # description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. # default: null # required: false # # key_path: # description: Path to the client's TLS key file. # default: null # required: false # # version: # description: The Docker API version. # required: false # default: will be supplied by the docker-py module. # # timeout: # description: The amount of time in seconds to wait on an API response. # required: false # default: 60 # # default_ip: # description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface # '0.0.0.0'. # required: false # default: 127.0.0.1 # # private_ssh_port: # description: The port containers use for SSH # required: false # default: 22 #defaults: # host: unix:///var/run/docker.sock # private_ssh_port: 22 # default_ip: 127.0.0.1 #hosts: # - host: tcp://10.45.5.16:4243 # private_ssh_port: 2022 # default_ip: 172.16.3.45 # - host: tcp://localhost:4243 # private_ssh_port: 2029 ansible-2.1.1.0/contrib/inventory/ec2.ini0000664000175400017540000001625712746444466021327 0ustar jenkinsjenkins00000000000000# Ansible EC2 external inventory script settings # [ec2] # to talk to a private eucalyptus instance uncomment these lines # and edit edit eucalyptus_host to be the host name of your cloud controller #eucalyptus = True #eucalyptus_host = clc.cloud.domain.org # AWS regions to make calls to. Set this to 'all' to make request to all regions # in AWS and merge the results together. Alternatively, set this to a comma # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' regions = all regions_exclude = us-gov-west-1,cn-north-1 # When generating inventory, Ansible needs to know how to address a server. # Each EC2 instance has a lot of variables associated with it. Here is the list: # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance # Below are 2 variables that are used as the address of a server: # - destination_variable # - vpc_destination_variable # This is the normal destination variable to use. If you are running Ansible # from outside EC2, then 'public_dns_name' makes the most sense. If you are # running Ansible from within EC2, then perhaps you want to use the internal # address, and should set this to 'private_dns_name'. The key of an EC2 tag # may optionally be used; however the boto instance variables hold precedence # in the event of a collision. destination_variable = public_dns_name # This allows you to override the inventory_name with an ec2 variable, instead # of using the destination_variable above. Addressing (aka ansible_ssh_host) # will still use destination_variable. Tags should be written as 'tag_TAGNAME'. #hostname_variable = tag_Name # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. # WARNING: - instances that are in the private vpc, _without_ public ip address # will not be listed in the inventory until You set: # vpc_destination_variable = private_ip_address vpc_destination_variable = ip_address # The following two settings allow flexible ansible host naming based on a # python format string and a comma-separated list of ec2 tags. Note that: # # 1) If the tags referenced are not present for some instances, empty strings # will be substituted in the format string. # 2) This overrides both destination_variable and vpc_destination_variable. # #destination_format = {0}.{1}.example.com #destination_format_tags = Name,environment # To tag instances on EC2 with the resource records that point to them from # Route53, uncomment and set 'route53' to True. route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. #rds = False # To exclude ElastiCache instances from the inventory, uncomment and set to False. #elasticache = False # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com # By default, only EC2 instances in the 'running' state are returned. Set # 'all_instances' to True to return all instances regardless of state. all_instances = False # By default, only EC2 instances in the 'running' state are returned. Specify # EC2 instance states to return as a comma-separated list. This # option is overriden when 'all_instances' is True. # instance_states = pending, running, shutting-down, terminated, stopping, stopped # By default, only RDS instances in the 'available' state are returned. Set # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False # By default, only ElastiCache clusters and nodes in the 'available' state # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' # to True return all ElastiCache clusters and nodes, regardless of state. # # Note that all_elasticache_nodes only applies to listed clusters. That means # if you set all_elastic_clusters to false, no node will be return from # unavailable clusters, regardless of the state and to what you set for # all_elasticache_nodes. all_elasticache_replication_groups = False all_elasticache_clusters = False all_elasticache_nodes = False # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-ec2.cache # - ansible-ec2.index cache_path = ~/.ansible/tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # To disable the cache, set this value to 0 cache_max_age = 300 # Organize groups into a nested/hierarchy instead of a flat namespace. nested_groups = False # Replace - tags when creating groups to avoid issues with ansible replace_dash_in_groups = True # If set to true, any tag of the form "a,b,c" is expanded into a list # and the results are used to create additional tag_* inventory groups. expand_csv_tags = False # The EC2 inventory output can become very large. To manage its size, # configure which groups should be created. group_by_instance_id = True group_by_region = True group_by_availability_zone = True group_by_ami_id = True group_by_instance_type = True group_by_key_pair = True group_by_vpc_id = True group_by_security_group = True group_by_tag_keys = True group_by_tag_none = True group_by_route53_names = True group_by_rds_engine = True group_by_rds_parameter_group = True group_by_elasticache_engine = True group_by_elasticache_cluster = True group_by_elasticache_parameter_group = True group_by_elasticache_replication_group = True # If you only want to include hosts that match a certain regular expression # pattern_include = staging-* # If you want to exclude any hosts that match a certain regular expression # pattern_exclude = staging-* # Instance filters can be used to control which instances are retrieved for # inventory. For the full list of possible filters, please read the EC2 API # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters # Filters are key/value pairs separated by '=', to list multiple filters use # a list separated by commas. See examples below. # Retrieve only instances with (key=value) env=staging tag # instance_filters = tag:env=staging # Retrieve only instances with role=webservers OR role=dbservers tag # instance_filters = tag:role=webservers,tag:role=dbservers # Retrieve only t1.micro instances OR instances with tag env=staging # instance_filters = instance-type=t1.micro,tag:env=staging # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* # (ex. webservers15, webservers1a, webservers123 etc) # instance_filters = tag:Name=webservers1* # A boto configuration profile may be used to separate out credentials # see http://boto.readthedocs.org/en/latest/boto_config_tut.html # boto_profile = some-boto-profile-name ansible-2.1.1.0/contrib/inventory/ec2.py0000775000175400017540000016125612746444466021203 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' This script also assumes there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus If you're using boto profiles (requires boto>=2.24.0) you can choose a profile using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using the AWS_PROFILE variable: AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dns_name - ec2_eventsSet - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS Tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import elasticache from boto import route53 import six from six.moves import configparser from collections import defaultdict try: import json except ImportError: import simplejson as json class Ec2Inventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} # Boto profile to use (if any) self.boto_profile = None # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Make sure that profile_name is not passed at all if not set # as pre 2.24 boto will fall over otherwise if self.boto_profile: if not hasattr(boto.ec2.EC2Connection, 'profile_name'): self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) config.read(ec2_ini_path) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') if config.has_option('ec2', 'hostname_variable'): self.hostname_variable = config.get('ec2', 'hostname_variable') else: self.hostname_variable = None if config.has_option('ec2', 'destination_format') and \ config.has_option('ec2', 'destination_format_tags'): self.destination_format = config.get('ec2', 'destination_format') self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') else: self.destination_format = None self.destination_format_tags = None # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Return all ElastiCache replication groups? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') else: self.all_elasticache_replication_groups = False # Return all ElastiCache clusters? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') else: self.all_elasticache_clusters = False # Return all ElastiCache nodes? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') else: self.all_elasticache_nodes = False # boto configuration profile (prefer CLI argument) self.boto_profile = self.args.boto_profile if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_name = 'ansible-ec2' aws_profile = lambda: (self.boto_profile or os.environ.get('AWS_PROFILE') or os.environ.get('AWS_ACCESS_KEY_ID')) if aws_profile(): cache_name = '%s-%s' % (cache_name, aws_profile()) self.cache_path_cache = cache_dir + "/%s.cache" % cache_name self.cache_path_index = cache_dir + "/%s.index" % cache_name self.cache_max_age = config.getint('ec2', 'cache_max_age') if config.has_option('ec2', 'expand_csv_tags'): self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') else: self.expand_csv_tags = False # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Replace dash or not in group names if config.has_option('ec2', 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') else: self.replace_dash_in_groups = True # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', 'group_by_region', 'group_by_availability_zone', 'group_by_ami_id', 'group_by_instance_type', 'group_by_key_pair', 'group_by_vpc_id', 'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none', 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude'); if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] for instance_filter in filters: instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue self.ec2_instance_filters[filter_key].append(filter_value) def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', help='Use boto profile for connections to EC2') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args def connect_to_aws(self, module, region): connect_args = {} # only pass the profile name if it's set (as it is not supported by older boto versions) if self.boto_profile: connect_args['profile_name'] = self.boto_profile self.boto_fix_security_token_in_profile(connect_args) conn = module.connect_to_region(region, **connect_args) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: marker = None while True: instances = conn.get_all_dbinstances(marker=marker) marker = instances.marker for instance in instances: self.add_rds_instance(instance, region) if not marker: break except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that wo can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region) def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def get_instance(self, region, instance_id): conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address if self.destination_format and self.destination_format_tags: dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) elif instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest else: hostname = self.to_safe(hostname).lower() # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(hostname): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(hostname): return # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, hostname) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest hostname = self.to_safe(hostname).lower() # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region) def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name) def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list) def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 if key == 'ec2__state': instance_vars['ec2_state'] = instance.state or '' instance_vars['ec2_state_code'] = instance.state_code elif key == 'ec2__previous_state': instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.items(): if self.expand_csv_tags and ',' in v: v = map(lambda x: x.strip(), v.split(',')) key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) else: pass # TODO Product codes if someone finds them useful #print key #print type(value) #print value return instance_vars def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: # Remove non-processed complex types pass return host_info def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] instance = self.get_instance(region, instance_id) return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def uncammelize(self, key): temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script Ec2Inventory() ansible-2.1.1.0/contrib/inventory/fleet.py0000775000175400017540000000600112746444466021613 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python """ fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and returns it under the host group 'coreos' """ # Copyright (C) 2014 Andrew Rothstein # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Thanks to the vagrant.py inventory script for giving me the basic structure # of this. # import sys import subprocess import re import string from optparse import OptionParser try: import json except: import simplejson as json # Options #------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of servers in your fleet") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") (options, args) = parser.parse_args() # # helper functions # def get_ssh_config(): configs = [] for box in list_running_boxes(): config = get_a_ssh_config(box) configs.append(config) return configs #list all the running instances in the fleet def list_running_boxes(): boxes = [] for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): matcher = re.search("[^\s]+[\s]+([^\s]+).+", line) if matcher and matcher.group(1) != "IP": boxes.append(matcher.group(1)) return boxes def get_a_ssh_config(box_name): config = {} config['Host'] = box_name config['ansible_ssh_user'] = 'core' config['ansible_python_interpreter'] = '/opt/bin/python' return config # List out servers that vagrant has running #------------------------------ if options.list: ssh_config = get_ssh_config() hosts = { 'coreos': []} for data in ssh_config: hosts['coreos'].append(data['Host']) print(json.dumps(hosts)) sys.exit(1) # Get out the host details #------------------------------ elif options.host: result = {} ssh_config = get_ssh_config() details = filter(lambda x: (x['Host'] == options.host), ssh_config) if len(details) > 0: #pass through the port, in case it's non standard. result = details[0] result print(json.dumps(result)) sys.exit(1) # Print out help #------------------------------ else: parser.print_help() sys.exit(1) ansible-2.1.1.0/contrib/inventory/freeipa.py0000775000175400017540000000423312746444466022134 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python import argparse from ipalib import api import json def initialize(): ''' This function initializes the FreeIPA/IPA API. This function requires no arguments. A kerberos key must be present in the users keyring in order for this to work. ''' api.bootstrap(context='cli') api.finalize() try: api.Backend.rpcclient.connect() except AttributeError: #FreeIPA < 4.0 compatibility api.Backend.xmlclient.connect() return api def list_groups(api): ''' This function returns a list of all host groups. This function requires one argument, the FreeIPA/IPA API object. ''' inventory = {} hostvars={} meta={} result = api.Command.hostgroup_find()['result'] for hostgroup in result: inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]} for host in hostgroup['member_host']: hostvars[host] = {} inventory['_meta'] = {'hostvars': hostvars} inv_string = json.dumps(inventory, indent=1, sort_keys=True) print(inv_string) return None def parse_args(): ''' This function parses the arguments that were passed in via the command line. This function expects no arguments. ''' parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specified host') return parser.parse_args() def print_host(host): ''' This function is really a stub, it could return variables to be used in a playbook. However, at this point there are no variables stored in FreeIPA/IPA. This function expects one string, this hostname to lookup variables for. ''' print(json.dumps({})) return None if __name__ == '__main__': args = parse_args() if args.host: print_host(args.host) elif args.list: api = initialize() list_groups(api) ansible-2.1.1.0/contrib/inventory/gce.ini0000664000175400017540000000464212746444466021407 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # The GCE inventory script has the following dependencies: # 1. A valid Google Cloud Platform account with Google Compute Engine # enabled. See https://cloud.google.com # 2. An OAuth2 Service Account flow should be enabled. This will generate # a private key file that the inventory script will use for API request # authorization. See https://developers.google.com/accounts/docs/OAuth2 # 3. Convert the private key from PKCS12 to PEM format # $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ # > -nodes -nocerts | openssl rsa -out pkey.pem # 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org # # (See ansible/test/gce_tests.py comments for full install instructions) # # Author: Eric Johnson [gce] # GCE Service Account configuration information can be stored in the # libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already # exist in your PYTHONPATH and be picked up automatically with an import # statement in the inventory script. However, you can specify an absolute # path to the secrets.py file with 'libcloud_secrets' parameter. libcloud_secrets = # If you are not going to use a 'secrets.py' file, you can set the necessary # authorization parameters here. gce_service_account_email_address = gce_service_account_pem_file_path = gce_project_id = [inventory] # The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should # contain the instance internal or external address. Values may be either # 'internal' or 'external'. If 'external' is specified but no external instance # address exists, the internal address will be used. # The INVENTORY_IP_TYPE environment variable will override this value. inventory_ip_type = ansible-2.1.1.0/contrib/inventory/gce.py0000775000175400017540000002741012746444466021261 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ''' GCE external inventory script ================================= Generates inventory that Ansible can understand by making API requests Google Compute Engine via the libcloud library. Full install/configuration instructions for the gce* modules can be found in the comments of ansible/test/gce_tests.py. When run against a specific host, this script returns the following variables based on the data obtained from the libcloud Node object: - gce_uuid - gce_id - gce_image - gce_machine_type - gce_private_ip - gce_public_ip - gce_name - gce_description - gce_status - gce_zone - gce_tags - gce_metadata - gce_network When run in --list mode, instances are grouped by the following categories: - zone: zone group name examples are us-central1-b, europe-west1-a, etc. - instance tags: An entry is created for each tag. For example, if you have two instances with a common tag called 'foo', they will both be grouped together under the 'tag_foo' name. - network name: the name of the network is appended to 'network_' (e.g. the 'default' network will result in a group named 'network_default') - machine type types follow a pattern like n1-standard-4, g1-small, etc. - running status: group name prefixed with 'status_' (e.g. status_running, status_stopped,..) - image: when using an ephemeral/scratch disk, this will be set to the image name used when creating the instance (e.g. debian-7-wheezy-v20130816). when your instance was created with a root persistent disk it will be set to 'persistent_disk' since there is no current way to determine the image. Examples: Execute uname on all instances in the us-central1-a zone $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson Contributors: Matt Hite Version: 0.0.2 ''' __requires__ = ['pycrypto>=2.6'] try: import pkg_resources except ImportError: # Use pkg_resources to find the correct versions of libraries and set # sys.path appropriately when there are multiversion installs. We don't # fail here as there is code that better expresses the errors where the # library is used. pass USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v2" import sys import os import argparse import ConfigParser import logging logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) try: import json except ImportError: import simplejson as json try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver _ = Provider.GCE except: print("GCE inventory script requires libcloud >= 0.13") sys.exit(1) class GceInventory(object): def __init__(self): # Read settings and parse CLI arguments self.parse_cli_args() self.config = self.get_config() self.driver = self.get_gce_driver() self.ip_type = self.get_inventory_options() if self.ip_type: self.ip_type = self.ip_type.lower() # Just display data for specific host if self.args.host: print(self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host)), pretty=self.args.pretty)) sys.exit(0) # Otherwise, assume user wants all instances grouped print(self.json_format_dict(self.group_instances(), pretty=self.args.pretty)) sys.exit(0) def get_config(self): """ Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'gce_service_account_email_address': '', 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'libcloud_secrets': '', 'inventory_ip_type': '', }) if 'gce' not in config.sections(): config.add_section('gce') if 'inventory' not in config.sections(): config.add_section('inventory') config.read(gce_ini_path) return config def get_inventory_options(self): """Determine inventory options. Environment variables always take precedence over configuration files.""" ip_type = self.config.get('inventory', 'inventory_ip_type') # If the appropriate environment variables are set, they override # other configuration ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) return ip_type def get_gce_driver(self): """Determine the GCE authorization settings and return a libcloud driver. """ # Attempt to get GCE params from a configuration file, if one # exists. secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" print(err) sys.exit(1) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = [ self.config.get('gce','gce_service_account_email_address'), self.config.get('gce','gce_service_account_pem_file_path') ] kwargs = {'project': self.config.get('gce', 'gce_project_id')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. args[0] = os.environ.get('GCE_EMAIL', args[0]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), ) return gce def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') self.args = parser.parse_args() def node_to_dict(self, inst): md = {} if inst is None: return {} if inst.extra['metadata'].has_key('items'): for entry in inst.extra['metadata']['items']: md[entry['key']] = entry['value'] net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] # default to exernal IP unless user has specified they prefer internal if self.ip_type == 'internal': ssh_host = inst.private_ips[0] else: ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] return { 'gce_uuid': inst.uuid, 'gce_id': inst.id, 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], 'gce_zone': inst.extra['zone'].name, 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP 'ansible_ssh_host': ssh_host } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) except Exception as e: return None def group_instances(self): '''Group all instances''' groups = {} meta = {} meta["hostvars"] = {} for node in self.driver.list_nodes(): name = node.name meta["hostvars"][name] = self.node_to_dict(node) zone = node.extra['zone'].name if groups.has_key(zone): groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: if t.startswith('group-'): tag = t[6:] else: tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if groups.has_key(net): groups[net].append(name) else: groups[net] = [name] machine_type = node.size if groups.has_key(machine_type): groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if groups.has_key(image): groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if groups.has_key(stat): groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script GceInventory() ansible-2.1.1.0/contrib/inventory/jail.py0000775000175400017540000000241312746444466021436 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen,PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'jail' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'jail'})) else: print("Need an argument, either --list or --host ") ansible-2.1.1.0/contrib/inventory/landscape.py0000775000175400017540000000661312746444466022457 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2015, Marc Abramowitz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use nodes discovered by Canonical's # Landscape (http://www.ubuntu.com/management/landscape-features). # # Requires the `landscape_api` Python module # See: # - https://landscape.canonical.com/static/doc/api/api-client-package.html # - https://landscape.canonical.com/static/doc/api/python-api.html # # Environment variables # --------------------- # - `LANDSCAPE_API_URI` # - `LANDSCAPE_API_KEY` # - `LANDSCAPE_API_SECRET` # - `LANDSCAPE_API_SSL_CA_FILE` (optional) import argparse import collections import os import sys from landscape_api.base import API, HTTPError try: import json except ImportError: import simplejson as json _key = 'landscape' class EnvironmentConfig(object): uri = os.getenv('LANDSCAPE_API_URI') access_key = os.getenv('LANDSCAPE_API_KEY') secret_key = os.getenv('LANDSCAPE_API_SECRET') ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') def _landscape_client(): env = EnvironmentConfig() return API( uri=env.uri, access_key=env.access_key, secret_key=env.secret_key, ssl_ca_file=env.ssl_ca_file) def get_landscape_members_data(): return _landscape_client().get_computers() def get_nodes(data): return [node['hostname'] for node in data] def get_groups(data): groups = collections.defaultdict(list) for node in data: for value in node['tags']: groups[value].append(node['hostname']) return groups def get_meta(data): meta = {'hostvars': {}} for node in data: meta['hostvars'][node['hostname']] = {'tags': node['tags']} return meta def print_list(): data = get_landscape_members_data() nodes = get_nodes(data) groups = get_groups(data) meta = get_meta(data) inventory_data = {_key: nodes, '_meta': meta} inventory_data.update(groups) print(json.dumps(inventory_data)) def print_host(host): data = get_landscape_members_data() meta = get_meta(data) print(json.dumps(meta['hostvars'][host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script reading from landscape cluster') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from landscape cluster' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.1.1.0/contrib/inventory/libcloud.ini0000664000175400017540000000032312746444466022436 0ustar jenkinsjenkins00000000000000# Ansible Apache Libcloud Generic inventory script [driver] provider = CLOUDSTACK host = path = secure = True verify_ssl_cert = True key = secret = [cache] cache_path=/path/to/your/cache cache_max_age=60 ansible-2.1.1.0/contrib/inventory/libvirt_lxc.py0000775000175400017540000000247712746444466023052 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen,PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'libvirt_lxc' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'libvirt_lxc'})) else: print("Need an argument, either --list or --host ") ansible-2.1.1.0/contrib/inventory/linode.ini0000664000175400017540000000076712746444466022127 0ustar jenkinsjenkins00000000000000# Ansible Linode external inventory script settings # [linode] # API calls to Linode are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-Linode.cache # - ansible-Linode.index cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 300 ansible-2.1.1.0/contrib/inventory/linode.py0000775000175400017540000002634512746444466022003 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python ''' Linode external inventory script ================================= Generates inventory that Ansible can understand by making API request to Linode using the Chube library. NOTE: This script assumes Ansible is being executed where Chube is already installed and has a valid config at ~/.chube. If not, run: pip install chube echo -e "---\napi_key: " > ~/.chube For more details, see: https://github.com/exosite/chube NOTE: This script also assumes that the Linodes in your account all have labels that correspond to hostnames that are in your resolver search path. Your resolver search path resides in /etc/hosts. When run against a specific host, this script returns the following variables: - api_id - datacenter_id - datacenter_city (lowercase city name of data center, e.g. 'tokyo') - label - display_group - create_dt - total_hd - total_xfer - total_ram - status - public_ip (The first public IP found) - private_ip (The first private IP found, or empty string if none) - alert_cpu_enabled - alert_cpu_threshold - alert_diskio_enabled - alert_diskio_threshold - alert_bwin_enabled - alert_bwin_threshold - alert_bwout_enabled - alert_bwout_threshold - alert_bwquota_enabled - alert_bwquota_threshold - backup_weekly_daily - backup_window - watchdog Peter Sankauskas did most of the legwork here with his linode plugin; I just adapted that for Linode. ''' # (c) 2013, Dan Slimmon # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### # Standard imports import os import re import sys import argparse from time import time try: import json except ImportError: import simplejson as json try: from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode except: try: # remove local paths and other stuff that may # cause an import conflict, as chube is sensitive # to name collisions on importing old_path = sys.path sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode sys.path = old_path except Exception as e: raise Exception("could not import chube") load_chube_config() # Imports for ansible import ConfigParser class LinodeInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of label to Linode ID self.index = {} # Local cache of Datacenter objects populated by populate_datacenter_cache() self._datacenter_cache = None # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of nodes for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') # Cache related cache_path = config.get('linode', 'cache_path') self.cache_path_cache = cache_path + "/ansible-linode.cache" self.cache_path_index = cache_path + "/ansible-linode.index" self.cache_max_age = config.getint('linode', 'cache_max_age') def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific node') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.get_nodes() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_nodes(self): """Makes an Linode API call to get the list of nodes.""" try: for node in Linode.search(status=Linode.STATUS_RUNNING): self.add_node(node) except chube_api.linode_api.ApiError as e: print("Looks like Linode's API is down:") print("") print(e) sys.exit(1) def get_node(self, linode_id): """Gets details about a specific node.""" try: return Linode.find(api_id=linode_id) except chube_api.linode_api.ApiError as e: print("Looks like Linode's API is down:") print("") print(e) sys.exit(1) def populate_datacenter_cache(self): """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" self._datacenter_cache = {} dcs = Datacenter.search() for dc in dcs: self._datacenter_cache[dc.api_id] = dc def get_datacenter_city(self, node): """Returns a the lowercase city name of the node's data center.""" if self._datacenter_cache is None: self.populate_datacenter_cache() location = self._datacenter_cache[node.datacenter_id].location location = location.lower() location = location.split(",")[0] return location def add_node(self, node): """Adds an node to the inventory and index.""" dest = node.label # Add to index self.index[dest] = node.api_id # Inventory: Group by node ID (always a group of 1) self.inventory[node.api_id] = [dest] # Inventory: Group by datacenter city self.push(self.inventory, self.get_datacenter_city(node), dest) # Inventory: Group by dipslay group self.push(self.inventory, node.display_group, dest) def get_host_info(self): """Get variables about a specific host.""" if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host might not exist anymore return self.json_format_dict({}, True) node_id = self.index[self.args.host] node = self.get_node(node_id) node_vars = {} for direct_attr in [ "api_id", "datacenter_id", "label", "display_group", "create_dt", "total_hd", "total_xfer", "total_ram", "status", "alert_cpu_enabled", "alert_cpu_threshold", "alert_diskio_enabled", "alert_diskio_threshold", "alert_bwin_enabled", "alert_bwin_threshold", "alert_bwout_enabled", "alert_bwout_threshold", "alert_bwquota_enabled", "alert_bwquota_threshold", "backup_weekly_daily", "backup_window", "watchdog" ]: node_vars[direct_attr] = getattr(node, direct_attr) node_vars["datacenter_city"] = self.get_datacenter_city(node) node_vars["public_ip"] = [addr.address for addr in node.ipaddresses if addr.is_public][0] # Set the SSH host information, so these inventory items can be used if # their labels aren't FQDNs node_vars['ansible_ssh_host'] = node_vars["public_ip"] node_vars['ansible_host'] = node_vars["public_ip"] private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] if private_ips: node_vars["private_ip"] = private_ips[0] return self.json_format_dict(node_vars, True) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) LinodeInventory() ansible-2.1.1.0/contrib/inventory/nagios_ndo.ini0000664000175400017540000000057212746444466022767 0ustar jenkinsjenkins00000000000000# Ansible Nagios external inventory script settings # [ndo] # NDO database URI # Make sure that data is returned as strings and not bytes if using python 3. # See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html # for supported databases and URI format. # Example for mysqlclient module : database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 ansible-2.1.1.0/contrib/inventory/nagios_ndo.py0000775000175400017540000000740212746444466022642 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2014, Jonathan Lestrelin # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Nagios NDO external inventory script. ======================================== Returns hosts and hostgroups from Nagios NDO. Configuration is read from `nagios_ndo.ini`. """ import os import argparse try: import configparser except ImportError: import ConfigParser configparser = ConfigParser import json try: from sqlalchemy import text from sqlalchemy.engine import create_engine except ImportError: print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") exit(1) class NagiosNDOInventory(object): def read_settings(self): config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') if config.has_option('ndo', 'database_uri'): self.ndo_database_uri = config.get('ndo', 'database_uri') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host', nargs=1) parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def get_hosts(self): engine = create_engine(self.ndo_database_uri) connection = engine.connect() select_hosts = text("SELECT display_name \ FROM nagios_hosts") select_hostgroups = text("SELECT alias \ FROM nagios_hostgroups") select_hostgroup_hosts = text("SELECT h.display_name \ FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ WHERE hgm.hostgroup_id = hg.hostgroup_id \ AND hgm.host_object_id = h.host_object_id \ AND hg.alias =:hostgroup_alias") hosts = connection.execute(select_hosts) self.result['all']['hosts'] = [host['display_name'] for host in hosts] for hostgroup in connection.execute(select_hostgroups): hostgroup_alias = hostgroup['alias'] self.result[hostgroup_alias] = {} hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] def __init__(self): self.defaultgroup = 'group_all' self.ndo_database_uri = None self.options = None self.read_settings() self.read_cli() self.result = {} self.result['all'] = {} self.result['all']['hosts'] = [] self.result['_meta'] = {} self.result['_meta']['hostvars'] = {} if self.ndo_database_uri: self.get_hosts() if self.options.host: print(json.dumps({})) elif self.options.list: print(json.dumps(self.result)) else: print("usage: --list or --host HOSTNAME") exit(1) else: print("Error: Database configuration is missing. See nagios_ndo.ini.") exit(1) NagiosNDOInventory() ansible-2.1.1.0/contrib/inventory/nova.ini0000664000175400017540000000225212746444466021607 0ustar jenkinsjenkins00000000000000# Ansible OpenStack external inventory script # DEPRECATED: please use openstack.py inventory which is configured for # auth using the os-client-config library and either clouds.yaml or standard # openstack environment variables [openstack] #------------------------------------------------------------------------- # Required settings #------------------------------------------------------------------------- # API version version = 2 # OpenStack nova username username = # OpenStack nova api_key or password api_key = # OpenStack nova auth_url auth_url = # OpenStack nova project_id or tenant name project_id = #------------------------------------------------------------------------- # Optional settings #------------------------------------------------------------------------- # Authentication system # auth_system = keystone # Serverarm region name to use # region_name = # Specify a preference for public or private IPs (public is default) # prefer_private = False # What service type (required for newer nova client) # service_type = compute # TODO: Some other options # insecure = # endpoint_type = # extensions = # service_name = ansible-2.1.1.0/contrib/inventory/nova.py0000775000175400017540000001556612746444466021477 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2012, Marco Vito Moscaritolo # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # WARNING: This file is deprecated. New work should focus on the openstack.py # inventory module, which properly handles multiple clouds as well as keystone # v3 and keystone auth plugins import sys import re import os import ConfigParser from novaclient import client as nova_client from six import iteritems try: import json except ImportError: import simplejson as json sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n") ################################################### # executed with no parameters, return the list of # all groups and hosts NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] NOVA_DEFAULTS = { 'auth_system': None, 'region_name': None, 'service_type': 'compute', } def nova_load_config_file(): p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS) for path in NOVA_CONFIG_FILES: if os.path.exists(path): p.read(path) return p return None def get_fallback(config, value, section="openstack"): """ Get value from config object and return the value or false """ try: return config.get(section, value) except ConfigParser.NoOptionError: return False def push(data, key, element): """ Assist in items to a dictionary of lists """ if (not element) or (not key): return if key in data: data[key].append(element) else: data[key] = [element] def to_safe(word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r"[^A-Za-z0-9\-]", "_", word) def get_ips(server, access_ip=True): """ Returns a list of the server's IPs, or the preferred access IP """ private = [] public = [] address_list = [] # Iterate through each servers network(s), get addresses and get type addresses = getattr(server, 'addresses', {}) if len(addresses) > 0: for network in addresses.itervalues(): for address in network: if address.get('OS-EXT-IPS:type', False) == 'fixed': private.append(address['addr']) elif address.get('OS-EXT-IPS:type', False) == 'floating': public.append(address['addr']) if not access_ip: address_list.append(server.accessIPv4) address_list.extend(private) address_list.extend(public) return address_list access_ip = None # Append group to list if server.accessIPv4: access_ip = server.accessIPv4 if (not access_ip) and public and not (private and prefer_private): access_ip = public[0] if private and not access_ip: access_ip = private[0] return access_ip def get_metadata(server): """Returns dictionary of all host metadata""" get_ips(server, False) results = {} for key in vars(server): # Extract value value = getattr(server, key) # Generate sanitized key key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() # Att value to instance result (exclude manager class) #TODO: maybe use value.__class__ or similar inside of key_name if key != 'os_manager': results[key] = value return results config = nova_load_config_file() if not config: sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES)) # Load up connections info based on config and then environment # variables username = (get_fallback(config, 'username') or os.environ.get('OS_USERNAME', None)) api_key = (get_fallback(config, 'api_key') or os.environ.get('OS_PASSWORD', None)) auth_url = (get_fallback(config, 'auth_url') or os.environ.get('OS_AUTH_URL', None)) project_id = (get_fallback(config, 'project_id') or os.environ.get('OS_TENANT_NAME', None)) region_name = (get_fallback(config, 'region_name') or os.environ.get('OS_REGION_NAME', None)) auth_system = (get_fallback(config, 'auth_system') or os.environ.get('OS_AUTH_SYSTEM', None)) # Determine what type of IP is preferred to return prefer_private = False try: prefer_private = config.getboolean('openstack', 'prefer_private') except ConfigParser.NoOptionError: pass client = nova_client.Client( version=config.get('openstack', 'version'), username=username, api_key=api_key, auth_url=auth_url, region_name=region_name, project_id=project_id, auth_system=auth_system, service_type=config.get('openstack', 'service_type'), ) # Default or added list option if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1: groups = {'_meta': {'hostvars': {}}} # Cycle on servers for server in client.servers.list(): access_ip = get_ips(server) # Push to name group of 1 push(groups, server.name, access_ip) # Run through each metadata item and add instance to it for key, value in iteritems(server.metadata): composed_key = to_safe('tag_{0}_{1}'.format(key, value)) push(groups, composed_key, access_ip) # Do special handling of group for backwards compat # inventory groups group = server.metadata['group'] if 'group' in server.metadata else 'undefined' push(groups, group, access_ip) # Add vars to _meta key for performance optimization in # Ansible 1.3+ groups['_meta']['hostvars'][access_ip] = get_metadata(server) # Return server list print(json.dumps(groups, sort_keys=True, indent=2)) sys.exit(0) ##################################################### # executed with a hostname as a parameter, return the # variables for that host elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): results = {} ips = [] for server in client.servers.list(): if sys.argv[2] in (get_ips(server) or []): results = get_metadata(server) print(json.dumps(results, sort_keys=True, indent=2)) sys.exit(0) else: print("usage: --list ..OR.. --host ") sys.exit(1) ansible-2.1.1.0/contrib/inventory/nsot.py0000664000175400017540000002315312746444466021503 0ustar jenkinsjenkins00000000000000#!/bin/env python ''' nsot ==== Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox Features -------- * Define host groups in form of NSoT device attribute criteria * All parameters defined by the spec as of 2015-09-05 are supported. + ``--list``: Returns JSON hash of host groups -> hosts and top-level ``_meta`` -> ``hostvars`` which correspond to all device attributes. Group vars can be specified in the YAML configuration, noted below. + ``--host ``: Returns JSON hash where every item is a device attribute. * In addition to all attributes assigned to resource being returned, script will also append ``site_id`` and ``id`` as facts to utilize. Confguration ------------ Since it'd be annoying and failure prone to guess where you're configuration file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. This file should adhere to the YAML spec. All top-level variable must be desired Ansible group-name hashed with single 'query' item to define the NSoT attribute query. Queries follow the normal NSoT query syntax, `shown here`_ .. _shown here: https://github.com/dropbox/pynsot#set-queries .. code:: yaml routers: query: 'deviceType=ROUTER' vars: a: b c: d juniper_fw: query: 'deviceType=FIREWALL manufacturer=JUNIPER' not_f10: query: '-manufacturer=FORCE10' The inventory will automatically use your ``.pynsotrc`` like normal pynsot from cli would, so make sure that's configured appropriately. .. note:: Attributes I'm showing above are influenced from ones that the Trigger project likes. As is the spirit of NSoT, use whichever attributes work best for your workflow. If config file is blank or absent, the following default groups will be created: * ``routers``: deviceType=ROUTER * ``switches``: deviceType=SWITCH * ``firewalls``: deviceType=FIREWALL These are likely not useful for everyone so please use the configuration. :) .. note:: By default, resources will only be returned for what your default site is set for in your ``~/.pynsotrc``. If you want to specify, add an extra key under the group for ``site: n``. Output Examples --------------- Here are some examples shown from just calling the command directly:: $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' { "routers": { "hosts": [ "test1.example.com" ], "vars": { "cool_level": "very", "group": "routers" } }, "firewalls": { "hosts": [ "test2.example.com" ], "vars": { "cool_level": "enough", "group": "firewalls" } }, "_meta": { "hostvars": { "test2.example.com": { "make": "SRX", "site_id": 1, "id": 108 }, "test1.example.com": { "make": "MX80", "site_id": 1, "id": 107 } } }, "rtr_and_fw": { "hosts": [ "test1.example.com", "test2.example.com" ], "vars": {} } } $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' { "make": "MX80", "site_id": 1, "id": 107 } ''' from __future__ import print_function import sys import os import pkg_resources import argparse import json import yaml from textwrap import dedent from pynsot.client import get_api_client from pynsot.app import HttpServerError from click.exceptions import UsageError def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) class NSoTInventory(object): '''NSoT Client object for gather inventory''' def __init__(self): self.config = dict() config_env = os.environ.get('NSOT_INVENTORY_CONFIG') if config_env: try: config_file = os.path.abspath(config_env) except IOError: # If file non-existent, use default config self._config_default() except Exception as e: sys.exit('%s\n' % e) with open(config_file) as f: try: self.config.update(yaml.safe_load(f)) except TypeError: # If empty file, use default config warning('Empty config file') self._config_default() except Exception as e: sys.exit('%s\n' % e) else: # Use defaults if env var missing self._config_default() self.groups = self.config.keys() self.client = get_api_client() self._meta = {'hostvars': dict()} def _config_default(self): default_yaml = ''' --- routers: query: deviceType=ROUTER switches: query: deviceType=SWITCH firewalls: query: deviceType=FIREWALL ''' self.config = yaml.safe_load(dedent(default_yaml)) def do_list(self): '''Direct callback for when ``--list`` is provided Relies on the configuration generated from init to run _inventory_group() ''' inventory = dict() for group, contents in self.config.iteritems(): group_response = self._inventory_group(group, contents) inventory.update(group_response) inventory.update({'_meta': self._meta}) return json.dumps(inventory) def do_host(self, host): return json.dumps(self._hostvars(host)) def _hostvars(self, host): '''Return dictionary of all device attributes Depending on number of devices in NSoT, could be rather slow since this has to request every device resource to filter through ''' device = [i for i in self.client.devices.get()['data']['devices'] if host in i['hostname']][0] attributes = device['attributes'] attributes.update({'site_id': device['site_id'], 'id': device['id']}) return attributes def _inventory_group(self, group, contents): '''Takes a group and returns inventory for it as dict :param group: Group name :type group: str :param contents: The contents of the group's YAML config :type contents: dict contents param should look like:: { 'query': 'xx', 'vars': 'a': 'b' } Will return something like:: { group: { hosts: [], vars: {}, } ''' query = contents.get('query') hostvars = contents.get('vars', dict()) site = contents.get('site', dict()) obj = {group: dict()} obj[group]['hosts'] = [] obj[group]['vars'] = hostvars try: assert isinstance(query, basestring) except: sys.exit('ERR: Group queries must be a single string\n' ' Group: %s\n' ' Query: %s\n' % (group, query) ) try: if site: site = self.client.sites(site) devices = site.devices.query.get(query=query) else: devices = self.client.devices.query.get(query=query) except HttpServerError as e: if '500' in str(e.response): _site = 'Correct site id?' _attr = 'Queried attributes actually exist?' questions = _site + '\n' + _attr sys.exit('ERR: 500 from server.\n%s' % questions) else: raise except UsageError: sys.exit('ERR: Could not connect to server. Running?') # Would do a list comprehension here, but would like to save code/time # and also acquire attributes in this step for host in devices['data']['devices']: # Iterate through each device that matches query, assign hostname # to the group's hosts array and then use this single iteration as # a chance to update self._meta which will be used in the final # return hostname = host['hostname'] obj[group]['hosts'].append(hostname) attributes = host['attributes'] attributes.update({'site_id': host['site_id'], 'id': host['id']}) self._meta['hostvars'].update({hostname: attributes}) return obj def parse_args(): desc = __doc__.splitlines()[4] # Just to avoid being redundant # Establish parser with options and error out if no action provided parser = argparse.ArgumentParser( description=desc, conflict_handler='resolve', ) # Arguments # # Currently accepting (--list | -l) and (--host | -h) # These must not be allowed together parser.add_argument( '--list', '-l', help='Print JSON object containing hosts to STDOUT', action='store_true', dest='list_', # Avoiding syntax highlighting for list ) parser.add_argument( '--host', '-h', help='Print JSON object containing hostvars for ', action='store', ) args = parser.parse_args() if not args.list_ and not args.host: # Require at least one option parser.exit(status=1, message='No action requested') if args.list_ and args.host: # Do not allow multiple options parser.exit(status=1, message='Too many actions requested') return args def main(): '''Set up argument handling and callback routing''' args = parse_args() client = NSoTInventory() # Callback condition if args.list_: print(client.do_list()) elif args.host: print(client.do_host(args.host)) if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/nsot.yaml0000664000175400017540000000065312746444466022015 0ustar jenkinsjenkins00000000000000--- juniper_routers: query: 'deviceType=ROUTER manufacturer=JUNIPER' vars: group: juniper_routers netconf: true os: junos cisco_asa: query: 'manufacturer=CISCO deviceType=FIREWALL' vars: group: cisco_asa routed_vpn: false stateful: true old_cisco_asa: query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' vars: old_nat: true not_f10: query: '-manufacturer=FORCE10' ansible-2.1.1.0/contrib/inventory/openshift.py0000775000175400017540000000633312746444466022523 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . DOCUMENTATION = ''' --- inventory: openshift short_description: Openshift gears external inventory script description: - Generates inventory of Openshift gears using the REST interface - this permit to reuse playbook to setup an Openshift gear version_added: None author: Michael Scherer ''' try: import json except ImportError: import simplejson as json import os import os.path import sys import ConfigParser import StringIO from ansible.module_utils.urls import open_url configparser = None def get_from_rhc_config(variable): global configparser CONF_FILE = os.path.expanduser('~/.openshift/express.conf') if os.path.exists(CONF_FILE): if not configparser: ini_str = '[root]\n' + open(CONF_FILE, 'r').read() configparser = ConfigParser.SafeConfigParser() configparser.readfp(StringIO.StringIO(ini_str)) try: return configparser.get('root', variable) except ConfigParser.NoOptionError: return None def get_config(env_var, config_var): result = os.getenv(env_var) if not result: result = get_from_rhc_config(config_var) if not result: print("failed=True msg='missing %s'" % env_var) sys.exit(1) return result def get_json_from_api(url, username, password): headers = {'Accept': 'application/json; version=1.5'} response = open_url(url, headers=headers, url_username=username, url_password=password) return json.loads(response.read())['data'] username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') response = get_json_from_api(broker_url + '/domains', username, password) response = get_json_from_api("%s/domains/%s/applications" % (broker_url, response[0]['id']), username, password) result = {} for app in response: # ssh://520311404832ce3e570000ff@blog-johndoe.example.org (user, host) = app['ssh_url'][6:].split('@') app_name = host.split('-')[0] result[app_name] = {} result[app_name]['hosts'] = [] result[app_name]['hosts'].append(host) result[app_name]['vars'] = {} result[app_name]['vars']['ansible_ssh_user'] = user if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({})) else: print("Need an argument, either --list or --host ") ansible-2.1.1.0/contrib/inventory/openstack.py0000775000175400017540000002017712746444466022515 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012, Marco Vito Moscaritolo # Copyright (c) 2013, Jesse Keating # Copyright (c) 2015, Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see . # The OpenStack Inventory module uses os-client-config for configuration. # https://github.com/stackforge/os-client-config # This means it will either: # - Respect normal OS_* environment variables like other OpenStack tools # - Read values from a clouds.yaml file. # If you want to configure via clouds.yaml, you can put the file in: # - Current directory # - ~/.config/openstack/clouds.yaml # - /etc/openstack/clouds.yaml # - /etc/ansible/openstack.yml # The clouds.yaml file can contain entries for multiple clouds and multiple # regions of those clouds. If it does, this inventory module will connect to # all of them and present them as one contiguous inventory. # # See the adjacent openstack.yml file for an example config file # There are two ansible inventory specific options that can be set in # the inventory section. # expand_hostvars controls whether or not the inventory will make extra API # calls to fill out additional information about each server # use_hostnames changes the behavior from registering every host with its UUID # and making a group of its hostname to only doing this if the # hostname in question has more than one server import argparse import collections import os import sys import time try: import json except: import simplejson as json import os_client_config import shade import shade.inventory CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] def get_groups_from_server(server_vars, namegroup=True): groups = [] region = server_vars['region'] cloud = server_vars['cloud'] metadata = server_vars.get('metadata', {}) # Create a group for the cloud groups.append(cloud) # Create a group on region groups.append(region) # And one by cloud_region groups.append("%s_%s" % (cloud, region)) # Check if group metadata key in servers' metadata if 'group' in metadata: groups.append(metadata['group']) for extra_group in metadata.get('groups', '').split(','): if extra_group: groups.append(extra_group) groups.append('instance-%s' % server_vars['id']) if namegroup: groups.append(server_vars['name']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: groups.append('%s-%s' % (key, server_vars[key]['name'])) for key, value in iter(metadata.items()): groups.append('meta-%s_%s' % (key, value)) az = server_vars.get('az', None) if az: # Make groups for az, region_az and cloud_region_az groups.append(az) groups.append('%s_%s' % (region, az)) groups.append('%s_%s_%s' % (cloud, region, az)) return groups def get_host_groups(inventory, refresh=False): (cache_file, cache_expiration_time) = get_cache_settings() if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): groups = to_json(get_host_groups_from_cloud(inventory)) open(cache_file, 'w').write(groups) else: groups = open(cache_file, 'r').read() return groups def append_hostvars(hostvars, groups, key, server, namegroup=False): hostvars[key] = dict( ansible_ssh_host=server['interface_ip'], openstack=server) for group in get_groups_from_server(server, namegroup=namegroup): groups[group].append(key) def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) firstpass = collections.defaultdict(list) hostvars = {} list_args = {} if hasattr(inventory, 'extra_config'): use_hostnames = inventory.extra_config['use_hostnames'] list_args['expand'] = inventory.extra_config['expand_hostvars'] else: use_hostnames = False for server in inventory.list_hosts(**list_args): if 'interface_ip' not in server: continue firstpass[server['name']].append(server) for name, servers in firstpass.items(): if len(servers) == 1 and use_hostnames: append_hostvars(hostvars, groups, name, servers[0]) else: server_ids = set() # Trap for duplicate results for server in servers: server_ids.add(server['id']) if len(server_ids) == 1 and use_hostnames: append_hostvars(hostvars, groups, name, servers[0]) else: for server in servers: append_hostvars( hostvars, groups, server['id'], server, namegroup=True) groups['_meta'] = {'hostvars': hostvars} return groups def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' if refresh: return True if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: mod_time = os.path.getmtime(cache_file) current_time = time.time() if (mod_time + cache_expiration_time) > current_time: return False return True def get_cache_settings(): config = os_client_config.config.OpenStackConfig( config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) # For inventory-wide caching cache_expiration_time = config.get_cache_expiration_time() cache_path = config.get_cache_path() if not os.path.exists(cache_path): os.makedirs(cache_path) cache_file = os.path.join(cache_path, 'ansible-inventory.cache') return (cache_file, cache_expiration_time) def to_json(in_dict): return json.dumps(in_dict, sort_keys=True, indent=2) def parse_args(): parser = argparse.ArgumentParser(description='OpenStack Inventory Module') parser.add_argument('--private', action='store_true', help='Use private address for ansible host') parser.add_argument('--refresh', action='store_true', help='Refresh cached information') parser.add_argument('--debug', action='store_true', default=False, help='Enable debug output') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') return parser.parse_args() def main(): args = parse_args() try: config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES shade.simple_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, ) if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): inventory_args.update(dict( config_key='ansible', config_defaults={ 'use_hostnames': False, 'expand_hostvars': True, } )) inventory = shade.inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) except shade.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0) if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/openstack.yml0000664000175400017540000000134412746444466022656 0ustar jenkinsjenkins00000000000000clouds: mordred: cloud: hp auth: username: mordred@example.com password: my-wonderful-password project_name: mordred-tenant region_name: region-b.geo-1 monty: cloud: hp auth: username: monty.taylor@example.com password: another-wonderful-password project_name: monty.taylor@example.com-default-tenant region_name: region-b.geo-1 rax: cloud: rackspace auth: username: example password: spectacular-password project_id: 2352426 region_name: DFW,ORD,IAD devstack: auth: auth_url: http://127.0.0.1:35357/v2.0/ username: stack password: stack project_name: stack ansible: use_hostnames: True expand_hostvars: False ansible-2.1.1.0/contrib/inventory/openvz.py0000775000175400017540000000520412746444466022041 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # openvz.py # # Copyright 2014 jordonr # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # # Inspired by libvirt_lxc.py inventory script # https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py # # Groups are determined by the description field of openvz guests # multiple groups can be separated by commas: webserver,dbserver from subprocess import Popen,PIPE import sys import json #List openvz hosts vzhosts = ['vzhost1','vzhost2','vzhost3'] #Add openvz hosts to the inventory and Add "_meta" trick inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} #default group, when description not defined default_group = ['vzguest'] def get_guests(): #Loop through vzhosts for h in vzhosts: #SSH to vzhost and get the list of guests in json pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) #Load Json info of guests json_data = json.loads(pipe.stdout.read()) #loop through guests for j in json_data: #Add information to host vars inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']} #determine group from guest description if j['description'] is not None: groups = j['description'].split(",") else: groups = default_group #add guest to inventory for g in groups: if g not in inventory: inventory[g] = {'hosts': []} inventory[g]['hosts'].append(j['hostname']) return inventory if len(sys.argv) == 2 and sys.argv[1] == '--list': inv_json = get_guests() print(json.dumps(inv_json, sort_keys=True)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({})) else: print("Need an argument, either --list or --host ") ansible-2.1.1.0/contrib/inventory/ovirt.ini0000664000175400017540000000246212746444466022012 0ustar jenkinsjenkins00000000000000# Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Author: Josha Inglis based on the gce.ini by Eric Johnson [ovirt] # ovirt Service Account configuration information can be stored in the # libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already # exist in your PYTHONPATH and be picked up automatically with an import # statement in the inventory script. However, you can specify an absolute # path to the secrets.py file with 'libcloud_secrets' parameter. ovirt_api_secrets = # If you are not going to use a 'secrets.py' file, you can set the necessary # authorization parameters here. ovirt_url = ovirt_username = ovirt_password = ansible-2.1.1.0/contrib/inventory/ovirt.py0000775000175400017540000002300112746444466021656 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2015 IIX Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ ovirt external inventory script ================================= Generates inventory that Ansible can understand by making API requests to oVirt via the ovirt-engine-sdk-python library. When run against a specific host, this script returns the following variables based on the data obtained from the ovirt_sdk Node object: - ovirt_uuid - ovirt_id - ovirt_image - ovirt_machine_type - ovirt_ips - ovirt_name - ovirt_description - ovirt_status - ovirt_zone - ovirt_tags - ovirt_stats When run in --list mode, instances are grouped by the following categories: - zone: zone group name. - instance tags: An entry is created for each tag. For example, if you have two instances with a common tag called 'foo', they will both be grouped together under the 'tag_foo' name. - network name: the name of the network is appended to 'network_' (e.g. the 'default' network will result in a group named 'network_default') - running status: group name prefixed with 'status_' (e.g. status_up, status_down,..) Examples: Execute uname on all instances in the us-central1-a zone $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" Use the ovirt inventory script to print out instance specific information $ contrib/inventory/ovirt.py --host my_instance Author: Josha Inglis based on the gce.py by Eric Johnson Version: 0.0.1 """ USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" USER_AGENT_VERSION = "v1" import sys import os import argparse import ConfigParser from collections import defaultdict try: import json except ImportError: # noinspection PyUnresolvedReferences,PyPackageRequirements import simplejson as json try: # noinspection PyUnresolvedReferences from ovirtsdk.api import API # noinspection PyUnresolvedReferences from ovirtsdk.xml import params except ImportError: print("ovirt inventory script requires ovirt-engine-sdk-python") sys.exit(1) class OVirtInventory(object): def __init__(self): # Read settings and parse CLI arguments self.args = self.parse_cli_args() self.driver = self.get_ovirt_driver() # Just display data for specific host if self.args.host: print(self.json_format_dict( self.node_to_dict(self.get_instance(self.args.host)), pretty=self.args.pretty )) sys.exit(0) # Otherwise, assume user wants all instances grouped print( self.json_format_dict( data=self.group_instances(), pretty=self.args.pretty ) ) sys.exit(0) @staticmethod def get_ovirt_driver(): """ Determine the ovirt authorization settings and return a ovirt_sdk driver. :rtype : ovirtsdk.api.API """ kwargs = {} ovirt_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'ovirt_url': '', 'ovirt_username': '', 'ovirt_password': '', 'ovirt_api_secrets': '', }) if 'ovirt' not in config.sections(): config.add_section('ovirt') config.read(ovirt_ini_path) # Attempt to get ovirt params from a configuration file, if one # exists. secrets_path = config.get('ovirt', 'ovirt_api_secrets') secrets_found = False try: # noinspection PyUnresolvedReferences,PyPackageRequirements import secrets kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) secrets_found = True except ImportError: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" print(err) sys.exit(1) sys.path.append(os.path.dirname(secrets_path)) try: # noinspection PyUnresolvedReferences,PyPackageRequirements import secrets kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) except ImportError: pass if not secrets_found: kwargs = { 'url': config.get('ovirt', 'ovirt_url'), 'username': config.get('ovirt', 'ovirt_username'), 'password': config.get('ovirt', 'ovirt_password'), } # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) # Retrieve and return the ovirt driver. return API(insecure=True, **kwargs) @staticmethod def parse_cli_args(): """ Command line argument processing :rtype : argparse.Namespace """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') return parser.parse_args() def node_to_dict(self, inst): """ :type inst: params.VM """ if inst is None: return {} inst.get_custom_properties() ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ if inst.get_guest_info() is not None else [] stats = {} for stat in inst.get_statistics().list(): stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() return { 'ovirt_uuid': inst.get_id(), 'ovirt_id': inst.get_id(), 'ovirt_image': inst.get_os().get_type(), 'ovirt_machine_type': inst.get_instance_type(), 'ovirt_ips': ips, 'ovirt_name': inst.get_name(), 'ovirt_description': inst.get_description(), 'ovirt_status': inst.get_status().get_state(), 'ovirt_zone': inst.get_cluster().get_id(), 'ovirt_tags': self.get_tags(inst), 'ovirt_stats': stats, # Hosts don't have a public name, so we add an IP 'ansible_ssh_host': ips[0] if len(ips) > 0 else None } @staticmethod def get_tags(inst): """ :type inst: params.VM """ return [x.get_name() for x in inst.get_tags().list()] # noinspection PyBroadException,PyUnusedLocal def get_instance(self, instance_name): """Gets details about a specific instance """ try: return self.driver.vms.get(name=instance_name) except Exception as e: return None def group_instances(self): """Group all instances""" groups = defaultdict(list) meta = {"hostvars": {}} for node in self.driver.vms.list(): assert isinstance(node, params.VM) name = node.get_name() meta["hostvars"][name] = self.node_to_dict(node) zone = node.get_cluster().get_name() groups[zone].append(name) tags = self.get_tags(node) for t in tags: tag = 'tag_%s' % t groups[tag].append(name) nets = [x.get_name() for x in node.get_nics().list()] for net in nets: net = 'network_%s' % net groups[net].append(name) status = node.get_status().get_state() stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups @staticmethod def json_format_dict(data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script OVirtInventory() ansible-2.1.1.0/contrib/inventory/proxmox.py0000775000175400017540000001712412746444466022240 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Updated 2016 by Matt Harris # # Added support for Proxmox VE 4.x # Added support for using the Notes field of a VM to define groups and variables: # A well-formatted JSON object in the Notes field will be added to the _meta # section for that VM. In addition, the "groups" key of this JSON object may be # used to specify group membership: # # { "groups": ["utility", "databases"], "a": false, "b": true } import urllib try: import json except ImportError: import simplejson as json import os import sys from optparse import OptionParser from six import iteritems from ansible.module_utils.urls import open_url class ProxmoxNodeList(list): def get_names(self): return [node['node'] for node in self] class ProxmoxVM(dict): def get_variables(self): variables = {} for key, value in iteritems(self): variables['proxmox_' + key] = value return variables class ProxmoxVMList(list): def __init__(self, data=[]): for item in data: self.append(ProxmoxVM(item)) def get_names(self): return [vm['name'] for vm in self if vm['template'] != 1] def get_by_name(self, name): results = [vm for vm in self if vm['name'] == name] return results[0] if len(results) > 0 else None def get_variables(self): variables = {} for vm in self: variables[vm['name']] = vm.get_variables() return variables class ProxmoxPoolList(list): def get_names(self): return [pool['poolid'] for pool in self] class ProxmoxPool(dict): def get_members_name(self): return [member['name'] for member in self['members'] if member['template'] != 1] class ProxmoxAPI(object): def __init__(self, options): self.options = options self.credentials = None if not options.url: raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') elif not options.username: raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') elif not options.password: raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') def auth(self): request_path = '{}api2/json/access/ticket'.format(self.options.url) request_params = urllib.urlencode({ 'username': self.options.username, 'password': self.options.password, }) data = json.load(open_url(request_path, data=request_params)) self.credentials = { 'ticket': data['data']['ticket'], 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], } def get(self, url, data=None): request_path = '{}{}'.format(self.options.url, url) headers = {'Cookie': 'PVEAuthCookie={}'.format(self.credentials['ticket'])} request = open_url(request_path, data=data, headers=headers) response = json.load(request) return response['data'] def nodes(self): return ProxmoxNodeList(self.get('api2/json/nodes')) def vms_by_type(self, node, type): return ProxmoxVMList(self.get('api2/json/nodes/{}/{}'.format(node, type))) def vm_description_by_type(self, node, vm, type): return self.get('api2/json/nodes/{}/{}/{}/config'.format(node, type, vm)) def node_qemu(self, node): return self.vms_by_type(node, 'qemu') def node_qemu_description(self, node, vm): return self.vm_description_by_type(node, vm, 'qemu') def node_lxc(self, node): return self.vms_by_type(node, 'lxc') def node_lxc_description(self, node, vm): return self.vm_description_by_type(node, vm, 'lxc') def pools(self): return ProxmoxPoolList(self.get('api2/json/pools')) def pool(self, poolid): return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) def main_list(options): results = { 'all': { 'hosts': [], }, '_meta': { 'hostvars': {}, } } proxmox_api = ProxmoxAPI(options) proxmox_api.auth() for node in proxmox_api.nodes().get_names(): qemu_list = proxmox_api.node_qemu(node) results['all']['hosts'] += qemu_list.get_names() results['_meta']['hostvars'].update(qemu_list.get_variables()) lxc_list = proxmox_api.node_lxc(node) results['all']['hosts'] += lxc_list.get_names() results['_meta']['hostvars'].update(lxc_list.get_variables()) for vm in results['_meta']['hostvars']: vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] try: type = results['_meta']['hostvars'][vm]['proxmox_type'] except KeyError: type = 'qemu' try: description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] except KeyError: description = None try: metadata = json.loads(description) except TypeError: metadata = {} except ValueError: metadata = { 'notes': description } if 'groups' in metadata: # print metadata for group in metadata['groups']: if group not in results: results[group] = { 'hosts': [] } results[group]['hosts'] += [vm] results['_meta']['hostvars'][vm].update(metadata) # pools for pool in proxmox_api.pools().get_names(): results[pool] = { 'hosts': proxmox_api.pool(pool).get_members_name(), } return results def main_host(options): proxmox_api = ProxmoxAPI(options) proxmox_api.auth() for node in proxmox_api.nodes().get_names(): qemu_list = proxmox_api.node_qemu(node) qemu = qemu_list.get_by_name(options.host) if qemu: return qemu.get_variables() return {} def main(): parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') parser.add_option('--list', action="store_true", default=False, dest="list") parser.add_option('--host', dest="host") parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') parser.add_option('--pretty', action="store_true", default=False, dest='pretty') (options, args) = parser.parse_args() if options.list: data = main_list(options) elif options.host: data = main_host(options) else: parser.print_help() sys.exit(1) indent = None if options.pretty: indent = 2 print(json.dumps(data, indent=indent)) if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/rackhd.py0000775000175400017540000000450112746444466021753 0ustar jenkinsjenkins00000000000000#!/usr/bin/python import json import requests import os import argparse import types RACKHD_URL = 'http://localhost:8080' class RackhdInventory(object): def __init__(self, nodeids): self._inventory = {} for nodeid in nodeids: self._load_inventory_data(nodeid) inventory = {} for nodeid,info in self._inventory.iteritems(): inventory[nodeid]= (self._format_output(nodeid, info)) print(json.dumps(inventory)) def _load_inventory_data(self, nodeid): info = {} info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid ) info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) results = {} for key,url in info.iteritems(): r = requests.get( url, verify=False) results[key] = r.text self._inventory[nodeid] = results def _format_output(self, nodeid, info): try: node_info = json.loads(info['lookup']) ipaddress = '' if len(node_info) > 0: ipaddress = node_info[0]['ipAddress'] output = { 'hosts':[ipaddress],'vars':{}} for key,result in info.iteritems(): output['vars'][key] = json.loads(result) output['vars']['ansible_ssh_user'] = 'monorail' except KeyError: pass return output def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') return parser.parse_args() try: #check if rackhd url(ie:10.1.1.45:8080) is specified in the environment RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) except: #use default values pass # Use the nodeid specified in the environment to limit the data returned # or return data for all available nodes nodeids = [] if (parse_args().host): try: nodeids += parse_args().host.split(',') RackhdInventory(nodeids) except: pass if (parse_args().list): try: url = RACKHD_URL + '/api/common/nodes' r = requests.get( url, verify=False) data = json.loads(r.text) for entry in data: if entry['type'] == 'compute': nodeids.append(entry['id']) RackhdInventory(nodeids) except: pass ansible-2.1.1.0/contrib/inventory/rax.ini0000664000175400017540000000435512746444466021444 0ustar jenkinsjenkins00000000000000# Ansible Rackspace external inventory script settings # [rax] # Environment Variable: RAX_CREDS_FILE # # An optional configuration that points to a pyrax-compatible credentials # file. # # If not supplied, rax.py will look for a credentials file # at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, # and therefore requires a file formatted per the SDK's specifications. # # https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md # creds_file = ~/.rackspace_cloud_credentials # Environment Variable: RAX_REGION # # An optional environment variable to narrow inventory search # scope. If used, needs a value like ORD, DFW, SYD (a Rackspace # datacenter) and optionally accepts a comma-separated list. # regions = IAD,ORD,DFW # Environment Variable: RAX_ENV # # A configuration that will use an environment as configured in # ~/.pyrax.cfg, see # https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md # env = prod # Environment Variable: RAX_META_PREFIX # Default: meta # # A configuration that changes the prefix used for meta key/value groups. # For compatibility with ec2.py set to "tag" # meta_prefix = meta # Environment Variable: RAX_ACCESS_NETWORK # Default: public # # A configuration that will tell the inventory script to use a specific # server network to determine the ansible_ssh_host value. If no address # is found, ansible_ssh_host will not be set. Accepts a comma-separated # list of network names, the first found wins. # access_network = public # Environment Variable: RAX_ACCESS_IP_VERSION # Default: 4 # # A configuration related to "access_network" that will attempt to # determine the ansible_ssh_host value for either IPv4 or IPv6. If no # address is found, ansible_ssh_host will not be set. # Acceptable values are: 4 or 6. Values other than 4 or 6 # will be ignored, and 4 will be used. Accepts a comma separated list, # the first found wins. # access_ip_version = 4 # Environment Variable: RAX_CACHE_MAX_AGE # Default: 600 # # A configuration the changes the behavior or the inventory cache. # Inventory listing performed before this value will be returned from # the cache instead of making a full request for all inventory. Setting # this value to 0 will force a full request. # cache_max_age = 600ansible-2.1.1.0/contrib/inventory/rax.py0000775000175400017540000003721212746444466021316 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Jesse Keating , # Matt Martz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Rackspace Cloud Inventory Authors: Jesse Keating , Matt Martz Description: Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API When run against a specific host, this script returns variables similar to: rax_os-ext-sts_task_state rax_addresses rax_links rax_image rax_os-ext-sts_vm_state rax_flavor rax_id rax_rax-bandwidth_bandwidth rax_user_id rax_os-dcf_diskconfig rax_accessipv4 rax_accessipv6 rax_progress rax_os-ext-sts_power_state rax_metadata rax_status rax_updated rax_hostid rax_name rax_created rax_tenant_id rax_loaded Configuration: rax.py can be configured using a rax.ini file or via environment variables. The rax.ini file should live in the same directory along side this script. The section header for configuration values related to this inventory plugin is [rax] [rax] creds_file = ~/.rackspace_cloud_credentials regions = IAD,ORD,DFW env = prod meta_prefix = meta access_network = public access_ip_version = 4 Each of these configurations also has a corresponding environment variable. An environment variable will override a configuration file value. creds_file: Environment Variable: RAX_CREDS_FILE An optional configuration that points to a pyrax-compatible credentials file. If not supplied, rax.py will look for a credentials file at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, and therefore requires a file formatted per the SDK's specifications. https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md regions: Environment Variable: RAX_REGION An optional environment variable to narrow inventory search scope. If used, needs a value like ORD, DFW, SYD (a Rackspace datacenter) and optionally accepts a comma-separated list. environment: Environment Variable: RAX_ENV A configuration that will use an environment as configured in ~/.pyrax.cfg, see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md meta_prefix: Environment Variable: RAX_META_PREFIX Default: meta A configuration that changes the prefix used for meta key/value groups. For compatibility with ec2.py set to "tag" access_network: Environment Variable: RAX_ACCESS_NETWORK Default: public A configuration that will tell the inventory script to use a specific server network to determine the ansible_ssh_host value. If no address is found, ansible_ssh_host will not be set. Accepts a comma-separated list of network names, the first found wins. access_ip_version: Environment Variable: RAX_ACCESS_IP_VERSION Default: 4 A configuration related to "access_network" that will attempt to determine the ansible_ssh_host value for either IPv4 or IPv6. If no address is found, ansible_ssh_host will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 will be ignored, and 4 will be used. Accepts a comma-separated list, the first found wins. Examples: List server instances $ RAX_CREDS_FILE=~/.raxpub rax.py --list List servers in ORD datacenter only $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list List servers in ORD and DFW datacenters $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list Get server details for server named "server.example.com" $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com Use the instance private IP to connect (instead of public IP) $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list """ import os import re import sys import argparse import warnings import collections import ConfigParser from six import iteritems from ansible.constants import get_config, mk_boolean try: import json except ImportError: import simplejson as json try: import pyrax from pyrax.utils import slugify except ImportError: print('pyrax is required for this module') sys.exit(1) from time import time NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) def load_config_file(): p = ConfigParser.ConfigParser() config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rax.ini') try: p.read(config_file) except ConfigParser.Error: return None else: return p p = load_config_file() def rax_slugify(value): return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) def to_dict(obj): instance = {} for key in dir(obj): value = getattr(obj, key) if isinstance(value, NON_CALLABLES) and not key.startswith('_'): key = rax_slugify(key) instance[key] = value return instance def host(regions, hostname): hostvars = {} for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) for server in cs.servers.list(): if server.name == hostname: for key, value in to_dict(server).items(): hostvars[key] = value # And finally, add an IP address hostvars['ansible_ssh_host'] = server.accessIPv4 print(json.dumps(hostvars, sort_keys=True, indent=4)) def _list_into_cache(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} cbs_attachments = collections.defaultdict(dict) prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', islist=True) try: ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: ip_versions = [v for v in ip_versions if v in [4, 6]] if not ip_versions: ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return None. Is this a valid region?' % region, RuntimeWarning) continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) # Check if group metadata key in servers' metadata group = server.metadata.get('group') if group: groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): if extra_group: groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): hostvars[server.name][key] = value hostvars[server.name]['rax_region'] = region for key, value in iteritems(server.metadata): groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) # Handle boot from volume if not server.image: if not cbs_attachments[region]: cbs = pyrax.connect_to_cloud_blockstorage(region) for vol in cbs.list(): if mk_boolean(vol.bootable): for attachment in vol.attachments: metadata = vol.volume_image_metadata server_id = attachment['server_id'] cbs_attachments[region][server_id] = { 'id': metadata['image_id'], 'name': slugify(metadata['image_name']) } image = cbs_attachments[region].get(server.id) if image: server.image = {'id': image['id']} hostvars[server.name]['rax_image'] = server.image hostvars[server.name]['rax_boot_source'] = 'volume' images[image['id']] = image['name'] else: hostvars[server.name]['rax_boot_source'] = 'local' try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) groups['image-%s' % server.image['id']].append(server.name) except KeyError: try: image = cs.images.get(server.image['id']) except cs.exceptions.NotFound: groups['image-%s' % server.image['id']].append(server.name) else: images[image.id] = image.human_id groups['image-%s' % image.human_id].append(server.name) groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' for network_name in networks: if ansible_ssh_host: break if network_name == 'public': for version_name in ip_versions: if ansible_ssh_host: break if version_name == 6 and server.accessIPv6: ansible_ssh_host = server.accessIPv6 elif server.accessIPv4: ansible_ssh_host = server.accessIPv4 if not ansible_ssh_host: addresses = server.addresses.get(network_name, []) for address in addresses: for version_name in ip_versions: if ansible_ssh_host: break if address.get('version') == version_name: ansible_ssh_host = address.get('addr') break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} with open(get_cache_file_path(regions), 'w') as cache_file: json.dump(groups, cache_file) def get_cache_file_path(regions): regions_str = '.'.join([reg.strip().lower() for reg in regions]) ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') if not os.path.exists(ansible_tmp_path): os.makedirs(ansible_tmp_path) return os.path.join(ansible_tmp_path, 'ansible-rax-%s-%s.cache' % ( pyrax.identity.username, regions_str)) def _list(regions, refresh_cache=True): cache_max_age = int(get_config(p, 'rax', 'cache_max_age', 'RAX_CACHE_MAX_AGE', 600)) if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) with open(get_cache_file_path(regions), 'r') as cache_file: groups = json.load(cache_file) print(json.dumps(groups, sort_keys=True, indent=4)) def parse_args(): parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') parser.add_argument('--refresh-cache', action='store_true', default=False, help=('Force refresh of cache, making API requests to' 'RackSpace (default: False - use cache files)')) return parser.parse_args() def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None) if creds_file is not None: creds_file = os.path.expanduser(creds_file) else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' % ('RAX_CREDS_FILE', default_creds_file)) sys.exit(1) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception as e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1) regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions def main(): args = parse_args() regions = setup() if args.list: _list(regions, refresh_cache=args.refresh_cache) elif args.host: host(regions, args.host) sys.exit(0) if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/rudder.ini0000664000175400017540000000200712746444466022127 0ustar jenkinsjenkins00000000000000# Rudder external inventory script settings # [rudder] # Your Rudder server API URL, typically: # https://rudder.local/rudder/api uri = https://rudder.local/rudder/api # By default, Rudder uses a self-signed certificate. Set this to True # to disable certificate validation. disable_ssl_certificate_validation = True # Your Rudder API token, created in the Web interface. token = aaabbbccc # Rudder API version to use, use "latest" for lastest available # version. version = latest # Property to use as group name in the output. # Can generally be "id" or "displayName". group_name = displayName # Fail if there are two groups with the same name or two hosts with the # same hostname in the output. fail_if_name_collision = True # We cache the results of Rudder API in a local file cache_path = /tmp/ansible-rudder.cache # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # Set to 0 to disable cache. cache_max_age = 500 ansible-2.1.1.0/contrib/inventory/rudder.py0000775000175400017540000002466212746444466022016 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2015, Normation SAS # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Rudder external inventory script ================================= Generates inventory that Ansible can understand by making API request to a Rudder server. This script is compatible with Rudder 2.10 or later. The output JSON includes all your Rudder groups, containing the hostnames of their nodes. Groups and nodes have a variable called rudder_group_id and rudder_node_id, which is the Rudder internal id of the item, allowing to identify them uniquely. Hosts variables also include your node properties, which are key => value properties set by the API and specific to each node. This script assumes there is an rudder.ini file alongside it. To specify a different path to rudder.ini, define the RUDDER_INI_PATH environment variable: export RUDDER_INI_PATH=/path/to/my_rudder.ini You have to configure your Rudder server information, either in rudder.ini or by overriding it with environment variables: export RUDDER_API_VERSION='latest' export RUDDER_API_TOKEN='my_token' export RUDDER_API_URI='https://rudder.local/rudder/api' ''' import sys import os import re import argparse import six import httplib2 as http from time import time from six.moves import configparser try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse try: import json except ImportError: import simplejson as json class RudderInventory(object): def __init__(self): ''' Main execution path ''' # Empty inventory by default self.inventory = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Create connection self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) # Cache if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): self.update_cache() else: self.load_cache() data_to_print = {} if self.args.host: data_to_print = self.get_host_info(self.args.host) elif self.args.list: data_to_print = self.get_list_info() print(self.json_format_dict(data_to_print, True)) def read_settings(self): ''' Reads the settings from the rudder.ini file ''' if six.PY2: config = configparser.SafeConfigParser() else: config = configparser.ConfigParser() rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) config.read(rudder_ini_path) self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') self.group_name = config.get('rudder', 'group_name') self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') self.cache_path = config.get('rudder', 'cache_path') self.cache_max_age = config.getint('rudder', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') self.args = parser.parse_args() def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path): mod_time = os.path.getmtime(self.cache_path) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_cache(self): ''' Reads the cache from the cache file sets self.cache ''' cache = open(self.cache_path, 'r') json_cache = cache.read() try: self.inventory = json.loads(json_cache) except ValueError as e: self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') def write_cache(self): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(self.inventory, True) cache = open(self.cache_path, 'w') cache.write(json_data) cache.close() def get_nodes(self): ''' Gets the nodes list from Rudder ''' path = '/nodes?select=nodeAndPolicyServer' result = self.api_call(path) nodes = {} for node in result['data']['nodes']: nodes[node['id']] = {} nodes[node['id']]['hostname'] = node['hostname'] if 'properties' in node: nodes[node['id']]['properties'] = node['properties'] else: nodes[node['id']]['properties'] = [] return nodes def get_groups(self): ''' Gets the groups list from Rudder ''' path = '/groups' result = self.api_call(path) groups = {} for group in result['data']['groups']: groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} return groups def update_cache(self): ''' Fetches the inventory information from Rudder and creates the inventory ''' nodes = self.get_nodes() groups = self.get_groups() inventory = {} for group in groups: # Check for name collision if self.fail_if_name_collision: if groups[group]['name'] in inventory: self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') # Add group to inventory inventory[groups[group]['name']] = {} inventory[groups[group]['name']]['hosts'] = [] inventory[groups[group]['name']]['vars'] = {} inventory[groups[group]['name']]['vars']['rudder_group_id'] = group for node in groups[group]['hosts']: # Add node to group inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) properties = {} for node in nodes: # Check for name collision if self.fail_if_name_collision: if nodes[node]['hostname'] in properties: self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') # Add node properties to inventory properties[nodes[node]['hostname']] = {} properties[nodes[node]['hostname']]['rudder_node_id'] = node for node_property in nodes[node]['properties']: properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] inventory['_meta'] = {} inventory['_meta']['hostvars'] = properties self.inventory = inventory if self.cache_max_age > 0: self.write_cache() def get_list_info(self): ''' Gets inventory information from local cache ''' return self.inventory def get_host_info(self, hostname): ''' Gets information about a specific host from local cache ''' if hostname in self.inventory['_meta']['hostvars']: return self.inventory['_meta']['hostvars'][hostname] else: return {} def api_call(self, path): ''' Performs an API request ''' headers = { 'X-API-Token': self.token, 'X-API-Version': self.version, 'Content-Type': 'application/json;charset=utf-8' } target = urlparse(self.uri + path) method = 'GET' body = '' try: response, content = self.conn.request(target.geturl(), method, body, headers) except: self.fail_with_error('Error connecting to Rudder server') try: data = json.loads(content) except ValueError as e: self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') return data def fail_with_error(self, err_msg, err_operation=None): ''' Logs an error to std err for ansible-playbook to consume and exit ''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible variable names ''' return re.sub('[^A-Za-z0-9\_]', '_', word) # Run the script RudderInventory() ansible-2.1.1.0/contrib/inventory/serf.py0000775000175400017540000000573012746444466021463 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2015, Marc Abramowitz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use nodes discovered by Serf # (https://serfdom.io/). # # Requires the `serfclient` Python module from # https://pypi.python.org/pypi/serfclient # # Environment variables # --------------------- # - `SERF_RPC_ADDR` # - `SERF_RPC_AUTH` # # These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr import argparse import collections import os import sys # https://pypi.python.org/pypi/serfclient from serfclient import SerfClient, EnvironmentConfig try: import json except ImportError: import simplejson as json _key = 'serf' def _serf_client(): env = EnvironmentConfig() return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) def get_serf_members_data(): return _serf_client().members().body['Members'] def get_nodes(data): return [node['Name'] for node in data] def get_groups(data): groups = collections.defaultdict(list) for node in data: for key, value in node['Tags'].items(): groups[value].append(node['Name']) return groups def get_meta(data): meta = {'hostvars': {}} for node in data: meta['hostvars'][node['Name']] = node['Tags'] return meta def print_list(): data = get_serf_members_data() nodes = get_nodes(data) groups = get_groups(data) meta = get_meta(data) inventory_data = {_key: nodes, '_meta': meta} inventory_data.update(groups) print(json.dumps(inventory_data)) def print_host(host): data = get_serf_members_data() meta = get_meta(data) print(json.dumps(meta['hostvars'][host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script reading from serf cluster') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from serf cluster' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.1.1.0/contrib/inventory/softlayer.py0000775000175400017540000001373412746444466022537 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python """ SoftLayer external inventory script. The SoftLayer Python API client is required. Use `pip install softlayer` to install it. You have a few different options for configuring your username and api_key. You can pass environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to ~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: - https://softlayer-python.readthedocs.org/en/latest/config_file.html The SoftLayer Python client has a built in command for saving this configuration file via the command `sl config setup`. """ # Copyright (C) 2014 AJ Bourg # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # I found the structure of the ec2.py script very helpful as an example # as I put this together. Thanks to whoever wrote that script! # import SoftLayer import re import argparse try: import json except: import simplejson as json class SoftLayerInventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): '''Main path''' self.inventory = self._empty_inventory() self.parse_options() if self.args.list: self.get_all_servers() print(self.json_format_dict(self.inventory, True)) elif self.args.host: self.get_virtual_servers() print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) def push(self, my_dict, key, element): '''Push an element onto an array that may not have been defined in the dict''' if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def parse_options(self): '''Parse all the arguments from the CLI''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') parser.add_argument('--list', action='store_true', default=False, help='List instances (default: False)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() def json_format_dict(self, data, pretty=False): '''Converts a dict to a JSON object and dumps it as a formatted string''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def process_instance(self, instance, instance_type="virtual"): '''Populate the inventory dictionary with any instance information''' # only want active instances if 'status' in instance and instance['status']['name'] != 'Active': return # and powered on instances if 'powerState' in instance and instance['powerState']['name'] != 'Running': return # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: return # if there's no IP address, we can't reach it if 'primaryIpAddress' not in instance: return dest = instance['primaryIpAddress'] self.inventory["_meta"]["hostvars"][dest] = instance # Inventory: group by memory if 'maxMemory' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) elif 'memoryCapacity' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) # Inventory: group by cpu count if 'maxCpu' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) elif 'processorPhysicalCoreAmount' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) # Inventory: group by datacenter self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) # Inventory: group by hostname self.push(self.inventory, self.to_safe(instance['hostname']), dest) # Inventory: group by FQDN self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) # Inventory: group by domain self.push(self.inventory, self.to_safe(instance['domain']), dest) # Inventory: group by type (hardware/virtual) self.push(self.inventory, instance_type, dest) def get_virtual_servers(self): '''Get all the CCI instances''' vs = SoftLayer.VSManager(self.client) instances = vs.list_instances() for instance in instances: self.process_instance(instance) def get_physical_servers(self): '''Get all the hardware instances''' hw = SoftLayer.HardwareManager(self.client) instances = hw.list_hardware() for instance in instances: self.process_instance(instance, 'hardware') def get_all_servers(self): self.client = SoftLayer.Client() self.get_virtual_servers() self.get_physical_servers() SoftLayerInventory() ansible-2.1.1.0/contrib/inventory/spacewalk.ini0000664000175400017540000000114512746444466022616 0ustar jenkinsjenkins00000000000000# Put this ini-file in the same directory as spacewalk.py # Command line options have precedence over options defined in here. [spacewalk] # To limit the script on one organization in spacewalk, uncomment org_number # and fill in the organization ID: # org_number=2 # To prefix the group names with the organization ID set prefix_org_name=true. # This is convenient when org_number is not set and you have the same group names # in multiple organizations within spacewalk # The prefix is "org_number-" prefix_org_name=false # Default cache_age for files created with spacewalk-report is 300sec. cache_age=300 ansible-2.1.1.0/contrib/inventory/spacewalk.py0000775000175400017540000002073412746444466022477 0ustar jenkinsjenkins00000000000000#!/bin/env python """ Spacewalk external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. To use this, copy this file over /etc/ansible/hosts and chmod +x the file. This, more or less, allows you to keep one central database containing info about all of your managed instances. This script is dependent upon the spacealk-reports package being installed on the same machine. It is basically a CSV-to-JSON converter from the output of "spacewalk-report system-groups-systems|inventory". Tested with Ansible 1.9.2 and spacewalk 2.3 """ # # Author:: Jon Miller # Copyright:: Copyright (c) 2013, Jon Miller # # Extended for support of multiple organizations and # adding the "_meta" dictionary to --list output by # Bernhard Lichtinger 2015 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # from __future__ import print_function import sys import os import time from optparse import OptionParser import subprocess import ConfigParser from six import iteritems try: import json except: import simplejson as json base_dir = os.path.dirname(os.path.realpath(__file__)) SW_REPORT = '/usr/bin/spacewalk-report' CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") CACHE_AGE = 300 # 5min INI_FILE = os.path.join(base_dir, "spacewalk.ini") # Sanity check if not os.path.exists(SW_REPORT): print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) sys.exit(1) # Pre-startup work if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR) os.chmod(CACHE_DIR, 0o2775) # Helper functions #------------------------------ def spacewalk_report(name): """Yield a dictionary form of each CSV output produced by the specified spacewalk-report """ cache_filename = os.path.join(CACHE_DIR, name) if not os.path.exists(cache_filename) or \ (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: # Update the cache fh = open(cache_filename, 'w') p = subprocess.Popen([SW_REPORT, name], stdout=fh) p.wait() fh.close() lines = open(cache_filename, 'r').readlines() keys = lines[0].strip().split(',') # add 'spacewalk_' prefix to the keys keys = [ 'spacewalk_' + key for key in keys ] for line in lines[1:]: values = line.strip().split(',') if len(keys) == len(values): yield dict(zip(keys, values)) # Options #------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of servers for Ansible") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") parser.add_option('-H', '--human', dest="human", default=False, action="store_true", help="Produce a friendlier version of either server list or host detail") parser.add_option('-o', '--org', default=None, dest="org_number", help="Limit to spacewalk organization number") parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", help="Prefix the group name with the organization number") (options, args) = parser.parse_args() # read spacewalk.ini if present #------------------------------ if os.path.exists(INI_FILE): config = ConfigParser.SafeConfigParser() config.read(INI_FILE) if config.has_option('spacewalk' , 'cache_age'): CACHE_AGE = config.get('spacewalk' , 'cache_age') if not options.org_number and config.has_option('spacewalk' , 'org_number'): options.org_number = config.get('spacewalk' , 'org_number') if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'): options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name') # Generate dictionary for mapping group_id to org_id #------------------------------ org_groups = {} try: for group in spacewalk_report('system-groups'): org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] except (OSError) as e: print('Problem executing the command "%s system-groups": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) # List out the known server from Spacewalk #------------------------------ if options.list: # to build the "_meta"-Group with hostvars first create dictionary for later use host_vars = {} try: for item in spacewalk_report('inventory'): host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() ) except (OSError) as e: print('Problem executing the command "%s inventory": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) groups = {} meta = { "hostvars" : {} } try: for system in spacewalk_report('system-groups-systems'): # first get org_id of system org_id = org_groups[ system['spacewalk_group_id'] ] # shall we add the org_id as prefix to the group name: if options.prefix_org_name: prefix = org_id + "-" group_name = prefix + system['spacewalk_group_name'] else: group_name = system['spacewalk_group_name'] # if we are limited to one organization: if options.org_number: if org_id == options.org_number: if group_name not in groups: groups[group_name] = set() groups[group_name].add(system['spacewalk_server_name']) if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] # or we list all groups and systems: else: if group_name not in groups: groups[group_name] = set() groups[group_name].add(system['spacewalk_server_name']) if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] except (OSError) as e: print('Problem executing the command "%s system-groups-systems": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) if options.human: for group, systems in iteritems(groups): print('[%s]\n%s\n' % (group, '\n'.join(systems))) else: final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] ) final["_meta"] = meta print(json.dumps( final )) #print(json.dumps(groups)) sys.exit(0) # Return a details information concerning the spacewalk server #------------------------------ elif options.host: host_details = {} try: for system in spacewalk_report('inventory'): if system['spacewalk_hostname'] == options.host: host_details = system break except (OSError) as e: print('Problem executing the command "%s inventory": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) if options.human: print('Host: %s' % options.host) for k, v in iteritems(host_details): print(' %s: %s' % (k, '\n '.join(v.split(';')))) else: print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) ) sys.exit(0) else: parser.print_help() sys.exit(1) ansible-2.1.1.0/contrib/inventory/ssh_config.py0000775000175400017540000000761312746444466022650 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2014, Tomas Karasek # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use aliases from ~/.ssh/config. # # There were some issues with various Paramiko versions. I took a deeper look # and tested heavily. Now, ansible parses this alright with Paramiko versions # 1.7.2 to 1.15.2. # # It prints inventory based on parsed ~/.ssh/config. You can refer to hosts # with their alias, rather than with the IP or hostname. It takes advantage # of the ansible_ssh_{host,port,user,private_key_file}. # # If you have in your .ssh/config: # Host git # HostName git.domain.org # User tkarasek # IdentityFile /home/tomk/keys/thekey # # You can do # $ ansible git -m ping # # Example invocation: # ssh_config.py --list # ssh_config.py --host import argparse import os.path import sys import paramiko try: import json except ImportError: import simplejson as json SSH_CONF = '~/.ssh/config' _key = 'ssh_config' _ssh_to_ansible = [('user', 'ansible_ssh_user'), ('hostname', 'ansible_ssh_host'), ('identityfile', 'ansible_ssh_private_key_file'), ('port', 'ansible_ssh_port')] def get_config(): if not os.path.isfile(os.path.expanduser(SSH_CONF)): return {} with open(os.path.expanduser(SSH_CONF)) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: if type(d['host']) is list: alias = d['host'][0] else: alias = d['host'] if ('?' in alias) or ('*' in alias): continue _copy = dict(d) del _copy['host'] if 'config' in _copy: ret_dict[alias] = _copy['config'] else: ret_dict[alias] = _copy return ret_dict def print_list(): cfg = get_config() meta = {'hostvars': {}} for alias, attributes in cfg.items(): tmp_dict = {} for ssh_opt, ans_opt in _ssh_to_ansible: if ssh_opt in attributes: # If the attribute is a list, just take the first element. # Private key is returned in a list for some reason. attr = attributes[ssh_opt] if type(attr) is list: attr = attr[0] tmp_dict[ans_opt] = attr if tmp_dict: meta['hostvars'][alias] = tmp_dict print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) def print_host(host): cfg = get_config() print(json.dumps(cfg[host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script parsing .ssh/config') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from .ssh/config inventory' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.1.1.0/contrib/inventory/vagrant.py0000775000175400017540000000757012746444466022172 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python """ Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and returns it under the host group 'vagrant' Example Vagrant configuration using this script: config.vm.provision :ansible do |ansible| ansible.playbook = "./provision/your_playbook.yml" ansible.inventory_file = "./provision/inventory/vagrant.py" ansible.verbose = true end """ # Copyright (C) 2013 Mark Mandel # 2015 Igor Khomyakov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Thanks to the spacewalk.py inventory script for giving me the basic structure # of this. # import sys import os.path import subprocess import re from paramiko import SSHConfig from cStringIO import StringIO from optparse import OptionParser from collections import defaultdict try: import json except: import simplejson as json _group = 'vagrant' # a default group _ssh_to_ansible = [('user', 'ansible_ssh_user'), ('hostname', 'ansible_ssh_host'), ('identityfile', 'ansible_ssh_private_key_file'), ('port', 'ansible_ssh_port')] # Options # ------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of Vagrant servers for Ansible") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") (options, args) = parser.parse_args() # # helper functions # # get all the ssh configs for all boxes in an array of dictionaries. def get_ssh_config(): return {k: get_a_ssh_config(k) for k in list_running_boxes()} # list all the running boxes def list_running_boxes(): output = subprocess.check_output(["vagrant", "status"]).split('\n') boxes = [] for line in output: matcher = re.search("([^\s]+)[\s]+running \(.+", line) if matcher: boxes.append(matcher.group(1)) return boxes # get the ssh config for a single box def get_a_ssh_config(box_name): """Gives back a map of all the machine's ssh configurations""" output = subprocess.check_output(["vagrant", "ssh-config", box_name]) config = SSHConfig() config.parse(StringIO(output)) host_config = config.lookup(box_name) # man 5 ssh_config: # > It is possible to have multiple identity files ... # > all these identities will be tried in sequence. for id in host_config['identityfile']: if os.path.isfile(id): host_config['identityfile'] = id return {v: host_config[k] for k, v in _ssh_to_ansible} # List out servers that vagrant has running # ------------------------------ if options.list: ssh_config = get_ssh_config() meta = defaultdict(dict) for host in ssh_config: meta['hostvars'][host] = ssh_config[host] print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) sys.exit(0) # Get out the host details # ------------------------------ elif options.host: print(json.dumps(get_a_ssh_config(options.host))) sys.exit(0) # Print out help # ------------------------------ else: parser.print_help() sys.exit(0) ansible-2.1.1.0/contrib/inventory/vbox.py0000775000175400017540000000631412746444466021501 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import sys from subprocess import Popen,PIPE try: import json except ImportError: import simplejson as json class SetEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) VBOX="VBoxManage" def get_hosts(host=None): returned = {} try: if host: p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) else: returned = { 'all': set(), '_metadata': {} } p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) except: sys.exit(1) hostvars = {} prevkey = pref_k = '' for line in p.stdout.readlines(): try: k,v = line.split(':',1) except: continue if k == '': continue v = v.strip() if k.startswith('Name'): if v not in hostvars: curname = v hostvars[curname] = {} try: # try to get network info x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE) ipinfo = x.stdout.read() if 'Value' in ipinfo: a,ip = ipinfo.split(':',1) hostvars[curname]['ansible_ssh_host'] = ip.strip() except: pass continue if not host: if k == 'Groups': for group in v.split('/'): if group: if group not in returned: returned[group] = set() returned[group].add(curname) returned['all'].add(curname) continue pref_k = 'vbox_' + k.strip().replace(' ','_') if k.startswith(' '): if prevkey not in hostvars[curname]: hostvars[curname][prevkey] = {} hostvars[curname][prevkey][pref_k]= v else: if v != '': hostvars[curname][pref_k] = v prevkey = pref_k if not host: returned['_metadata']['hostvars'] = hostvars else: returned = hostvars[host] return returned if __name__ == '__main__': inventory = {} hostname = None if len(sys.argv) > 1: if sys.argv[1] == "--host": hostname = sys.argv[2] if hostname: inventory = get_hosts(hostname) else: inventory = get_hosts() sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) ansible-2.1.1.0/contrib/inventory/vmware.ini0000664000175400017540000000276312746444466022154 0ustar jenkinsjenkins00000000000000# Ansible VMware external inventory script settings [defaults] # If true (the default), return only guest VMs. If false, also return host # systems in the results. guests_only = True # Specify an alternate group name for guest VMs. If not defined, defaults to # the basename of the inventory script + "_vm", e.g. "vmware_vm". #vm_group = vm_group_name # Specify an alternate group name for host systems when guests_only=false. # If not defined, defaults to the basename of the inventory script + "_hw", # e.g. "vmware_hw". #hw_group = hw_group_name # Specify the number of seconds to use the inventory cache before it is # considered stale. If not defined, defaults to 0 seconds. #cache_max_age = 3600 # Specify the directory used for storing the inventory cache. If not defined, # caching will be disabled. #cache_dir = ~/.cache/ansible # Specify a prefix filter. Any VMs with names beginning with this string will # not be returned. # prefix_filter = test_ [auth] # Specify hostname or IP address of vCenter/ESXi server. A port may be # included with the hostname, e.g.: vcenter.example.com:8443. This setting # may also be defined via the VMWARE_HOST environment variable. host = vcenter.example.com # Specify a username to access the vCenter host. This setting may also be # defined with the VMWARE_USER environment variable. user = ihasaccess # Specify a password to access the vCenter host. This setting may also be # defined with the VMWARE_PASSWORD environment variable. password = ssshverysecret ansible-2.1.1.0/contrib/inventory/vmware.py0000775000175400017540000004112612746444466022024 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- ''' VMware Inventory Script ======================= Retrieve information about virtual machines from a vCenter server or standalone ESX host. When `group_by=false` (in the INI file), host systems are also returned in addition to VMs. This script will attempt to read configuration from an INI file with the same base filename if present, or `vmware.ini` if not. It is possible to create symlinks to the inventory script to support multiple configurations, e.g.: * `vmware.py` (this script) * `vmware.ini` (default configuration, will be read by `vmware.py`) * `vmware_test.py` (symlink to `vmware.py`) * `vmware_test.ini` (test configuration, will be read by `vmware_test.py`) * `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no `vmware_other.ini` exists) The path to an INI file may also be specified via the `VMWARE_INI` environment variable, in which case the filename matching rules above will not apply. Host and authentication parameters may be specified via the `VMWARE_HOST`, `VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will take precedence over options present in the INI file. An INI file is not required if these options are specified using environment variables. ''' from __future__ import print_function import collections import json import logging import optparse import os import sys import time import ConfigParser from six import text_type # Disable logging message trigged by pSphere/suds. try: from logging import NullHandler except ImportError: from logging import Handler class NullHandler(Handler): def emit(self, record): pass logging.getLogger('psphere').addHandler(NullHandler()) logging.getLogger('suds').addHandler(NullHandler()) from psphere.client import Client from psphere.errors import ObjectNotFoundError from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network from suds.sudsobject import Object as SudsObject class VMwareInventory(object): def __init__(self, guests_only=None): self.config = ConfigParser.SafeConfigParser() if os.environ.get('VMWARE_INI', ''): config_files = [os.environ['VMWARE_INI']] else: config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini'] for config_file in config_files: if os.path.exists(config_file): self.config.read(config_file) break # Retrieve only guest VMs, or include host systems? if guests_only is not None: self.guests_only = guests_only elif self.config.has_option('defaults', 'guests_only'): self.guests_only = self.config.getboolean('defaults', 'guests_only') else: self.guests_only = True # Read authentication information from VMware environment variables # (if set), otherwise from INI file. auth_host = os.environ.get('VMWARE_HOST') if not auth_host and self.config.has_option('auth', 'host'): auth_host = self.config.get('auth', 'host') auth_user = os.environ.get('VMWARE_USER') if not auth_user and self.config.has_option('auth', 'user'): auth_user = self.config.get('auth', 'user') auth_password = os.environ.get('VMWARE_PASSWORD') if not auth_password and self.config.has_option('auth', 'password'): auth_password = self.config.get('auth', 'password') # Create the VMware client connection. self.client = Client(auth_host, auth_user, auth_password) def _put_cache(self, name, value): ''' Saves the value to cache with the name given. ''' if self.config.has_option('defaults', 'cache_dir'): cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_file = os.path.join(cache_dir, name) with open(cache_file, 'w') as cache: json.dump(value, cache) def _get_cache(self, name, default=None): ''' Retrieves the value from cache for the given name. ''' if self.config.has_option('defaults', 'cache_dir'): cache_dir = self.config.get('defaults', 'cache_dir') cache_file = os.path.join(cache_dir, name) if os.path.exists(cache_file): if self.config.has_option('defaults', 'cache_max_age'): cache_max_age = self.config.getint('defaults', 'cache_max_age') else: cache_max_age = 0 cache_stat = os.stat(cache_file) if (cache_stat.st_mtime + cache_max_age) >= time.time(): with open(cache_file) as cache: return json.load(cache) return default def _flatten_dict(self, d, parent_key='', sep='_'): ''' Flatten nested dicts by combining keys with a separator. Lists with only string items are included as is; any other lists are discarded. ''' items = [] for k, v in d.items(): if k.startswith('_'): continue new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(self._flatten_dict(v, new_key, sep).items()) elif isinstance(v, (list, tuple)): if all([isinstance(x, basestring) for x in v]): items.append((new_key, v)) else: items.append((new_key, v)) return dict(items) def _get_obj_info(self, obj, depth=99, seen=None): ''' Recursively build a data structure for the given pSphere object (depth only applies to ManagedObject instances). ''' seen = seen or set() if isinstance(obj, ManagedObject): try: obj_unicode = text_type(getattr(obj, 'name')) except AttributeError: obj_unicode = () if obj in seen: return obj_unicode seen.add(obj) if depth <= 0: return obj_unicode d = {} for attr in dir(obj): if attr.startswith('_'): continue try: val = getattr(obj, attr) obj_info = self._get_obj_info(val, depth - 1, seen) if obj_info != (): d[attr] = obj_info except Exception as e: pass return d elif isinstance(obj, SudsObject): d = {} for key, val in iter(obj): obj_info = self._get_obj_info(val, depth, seen) if obj_info != (): d[key] = obj_info return d elif isinstance(obj, (list, tuple)): l = [] for val in iter(obj): obj_info = self._get_obj_info(val, depth, seen) if obj_info != (): l.append(obj_info) return l elif isinstance(obj, (type(None), bool, int, long, float, basestring)): return obj else: return () def _get_host_info(self, host, prefix='vmware'): ''' Return a flattened dict with info about the given host system. ''' host_info = { 'name': host.name, } for attr in ('datastore', 'network', 'vm'): try: value = getattr(host, attr) host_info['%ss' % attr] = self._get_obj_info(value, depth=0) except AttributeError: host_info['%ss' % attr] = [] for k, v in self._get_obj_info(host.summary, depth=0).items(): if isinstance(v, collections.MutableMapping): for k2, v2 in v.items(): host_info[k2] = v2 elif k != 'host': host_info[k] = v try: host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress except Exception as e: print(e, file=sys.stderr) host_info = self._flatten_dict(host_info, prefix) if ('%s_ipAddress' % prefix) in host_info: host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix] return host_info def _get_vm_info(self, vm, prefix='vmware'): ''' Return a flattened dict with info about the given virtual machine. ''' vm_info = { 'name': vm.name, } for attr in ('datastore', 'network'): try: value = getattr(vm, attr) vm_info['%ss' % attr] = self._get_obj_info(value, depth=0) except AttributeError: vm_info['%ss' % attr] = [] try: vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0) except AttributeError: vm_info['resourcePool'] = '' try: vm_info['guestState'] = vm.guest.guestState except AttributeError: vm_info['guestState'] = '' for k, v in self._get_obj_info(vm.summary, depth=0).items(): if isinstance(v, collections.MutableMapping): for k2, v2 in v.items(): if k2 == 'host': k2 = 'hostSystem' vm_info[k2] = v2 elif k != 'vm': vm_info[k] = v vm_info = self._flatten_dict(vm_info, prefix) if ('%s_ipAddress' % prefix) in vm_info: vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix] return vm_info def _add_host(self, inv, parent_group, host_name): ''' Add the host to the parent group in the given inventory. ''' p_group = inv.setdefault(parent_group, []) if isinstance(p_group, dict): group_hosts = p_group.setdefault('hosts', []) else: group_hosts = p_group if host_name not in group_hosts: group_hosts.append(host_name) def _add_child(self, inv, parent_group, child_group): ''' Add a child group to a parent group in the given inventory. ''' if parent_group != 'all': p_group = inv.setdefault(parent_group, {}) if not isinstance(p_group, dict): inv[parent_group] = {'hosts': p_group} p_group = inv[parent_group] group_children = p_group.setdefault('children', []) if child_group not in group_children: group_children.append(child_group) inv.setdefault(child_group, []) def get_inventory(self, meta_hostvars=True): ''' Reads the inventory from cache or VMware API via pSphere. ''' # Use different cache names for guests only vs. all hosts. if self.guests_only: cache_name = '__inventory_guests__' else: cache_name = '__inventory_all__' inv = self._get_cache(cache_name, None) if inv is not None: return inv inv = {'all': {'hosts': []}} if meta_hostvars: inv['_meta'] = {'hostvars': {}} default_group = os.path.basename(sys.argv[0]).rstrip('.py') if not self.guests_only: if self.config.has_option('defaults', 'hw_group'): hw_group = self.config.get('defaults', 'hw_group') else: hw_group = default_group + '_hw' if self.config.has_option('defaults', 'vm_group'): vm_group = self.config.get('defaults', 'vm_group') else: vm_group = default_group + '_vm' if self.config.has_option('defaults', 'prefix_filter'): prefix_filter = self.config.get('defaults', 'prefix_filter') else: prefix_filter = None # Loop through physical hosts: for host in HostSystem.all(self.client): if not self.guests_only: self._add_host(inv, 'all', host.name) self._add_host(inv, hw_group, host.name) host_info = self._get_host_info(host) if meta_hostvars: inv['_meta']['hostvars'][host.name] = host_info self._put_cache(host.name, host_info) # Loop through all VMs on physical host. for vm in host.vm: if prefix_filter: if vm.name.startswith( prefix_filter ): continue self._add_host(inv, 'all', vm.name) self._add_host(inv, vm_group, vm.name) vm_info = self._get_vm_info(vm) if meta_hostvars: inv['_meta']['hostvars'][vm.name] = vm_info self._put_cache(vm.name, vm_info) # Group by resource pool. vm_resourcePool = vm_info.get('vmware_resourcePool', None) if vm_resourcePool: self._add_child(inv, vm_group, 'resource_pools') self._add_child(inv, 'resource_pools', vm_resourcePool) self._add_host(inv, vm_resourcePool, vm.name) # Group by datastore. for vm_datastore in vm_info.get('vmware_datastores', []): self._add_child(inv, vm_group, 'datastores') self._add_child(inv, 'datastores', vm_datastore) self._add_host(inv, vm_datastore, vm.name) # Group by network. for vm_network in vm_info.get('vmware_networks', []): self._add_child(inv, vm_group, 'networks') self._add_child(inv, 'networks', vm_network) self._add_host(inv, vm_network, vm.name) # Group by guest OS. vm_guestId = vm_info.get('vmware_guestId', None) if vm_guestId: self._add_child(inv, vm_group, 'guests') self._add_child(inv, 'guests', vm_guestId) self._add_host(inv, vm_guestId, vm.name) # Group all VM templates. vm_template = vm_info.get('vmware_template', False) if vm_template: self._add_child(inv, vm_group, 'templates') self._add_host(inv, 'templates', vm.name) self._put_cache(cache_name, inv) return inv def get_host(self, hostname): ''' Read info about a specific host or VM from cache or VMware API. ''' inv = self._get_cache(hostname, None) if inv is not None: return inv if not self.guests_only: try: host = HostSystem.get(self.client, name=hostname) inv = self._get_host_info(host) except ObjectNotFoundError: pass if inv is None: try: vm = VirtualMachine.get(self.client, name=hostname) inv = self._get_vm_info(vm) except ObjectNotFoundError: pass if inv is not None: self._put_cache(hostname, inv) return inv or {} def main(): parser = optparse.OptionParser() parser.add_option('--list', action='store_true', dest='list', default=False, help='Output inventory groups and hosts') parser.add_option('--host', dest='host', default=None, metavar='HOST', help='Output variables only for the given hostname') # Additional options for use when running the script standalone, but never # used by Ansible. parser.add_option('--pretty', action='store_true', dest='pretty', default=False, help='Output nicely-formatted JSON') parser.add_option('--include-host-systems', action='store_true', dest='include_host_systems', default=False, help='Include host systems in addition to VMs') parser.add_option('--no-meta-hostvars', action='store_false', dest='meta_hostvars', default=True, help='Exclude [\'_meta\'][\'hostvars\'] with --list') options, args = parser.parse_args() if options.include_host_systems: vmware_inventory = VMwareInventory(guests_only=False) else: vmware_inventory = VMwareInventory() if options.host is not None: inventory = vmware_inventory.get_host(options.host) else: inventory = vmware_inventory.get_inventory(options.meta_hostvars) json_kwargs = {} if options.pretty: json_kwargs.update({'indent': 4, 'sort_keys': True}) json.dump(inventory, sys.stdout, **json_kwargs) if __name__ == '__main__': main() ansible-2.1.1.0/contrib/inventory/windows_azure.ini0000664000175400017540000000154112746444466023544 0ustar jenkinsjenkins00000000000000# Ansible Windows Azure external inventory script settings # [azure] # The module needs your Windows Azure subscription ID and Management certificate path. # These may also be specified on the command line via --subscription-id and --cert-path # or via the environment variables AZURE_SUBSCRIPTION_ID and AZURE_CERT_PATH # #subscription_id = aaaaaaaa-1234-1234-1234-aaaaaaaaaaaa #cert_path = /path/to/cert.pem # API calls to Windows Azure may be slow. For this reason, we cache the results # of an API call. Set this to the path you want cache files to be written to. # Two files will be written to this directory: # - ansible-azure.cache # - ansible-azure.index # cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # cache_max_age = 300 ansible-2.1.1.0/contrib/inventory/windows_azure.py0000775000175400017540000002555512746444466023433 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python ''' Windows Azure external inventory script ======================================= Generates inventory that Ansible can understand by making API request to Windows Azure using the azure python library. NOTE: This script assumes Ansible is being executed where azure is already installed. pip install azure Adapted from the ansible Linode plugin by Dan Slimmon. ''' # (c) 2013, John Whitbeck # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### # Standard imports import re import sys import argparse import os from urlparse import urlparse from time import time try: import json except ImportError: import simplejson as json try: from azure.servicemanagement import ServiceManagementService except ImportError as e: sys.exit("ImportError: {0}".format(str(e))) # Imports for ansible import ConfigParser class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} self.host_metadata = {} # Cache setting defaults. # These can be overridden in settings (see `read_settings`). cache_dir = os.path.expanduser('~') self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache') self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index') self.cache_max_age = 0 # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list or self.args.host: # Display list of nodes for inventory if len(self.inventory) == 0: data = json.loads(self.get_inventory_from_cache()) else: data = self.inventory if self.args.host: data_to_print = self.get_host(self.args.host) else: # Add the `['_meta']['hostvars']` information. hostvars = {} if len(data) > 0: for host in set([h for hosts in data.values() for h in hosts if h]): hostvars[host] = self.get_host(host, jsonify=False) data['_meta'] = {'hostvars': hostvars} # JSONify the data. data_to_print = self.json_format_dict(data, pretty=True) print(data_to_print) def get_host(self, hostname, jsonify=True): """Return information about the given hostname, based on what the Windows Azure API provides. """ if hostname not in self.host_metadata: return "No host found: %s" % json.dumps(self.host_metadata) if jsonify: return json.dumps(self.host_metadata[hostname]) return self.host_metadata[hostname] def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find(self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path'))) self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache') self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index') if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on Azure', ) parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of thecache by making API requests to Azure ' '(default: False - use cache files)', ) parser.add_argument('--host', action='store', help='Get all information about an instance.') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except Exception as e: sys.exit("Error: Failed to access cloud services - {0}".format(str(e))) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service. """ try: for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments: self.add_deployment(cloud_service, deployment) except Exception as e: sys.exit("Error: Failed to access deployments - {0}".format(str(e))) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" for role in deployment.role_instance_list.role_instances: try: # Default port 22 unless port found with name 'SSH' port = '22' for ie in role.instance_endpoints.instance_endpoints: if ie.name == 'SSH': port = ie.public_port break except AttributeError as e: pass finally: self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status) def add_instance(self, hostname, deployment, ssh_port, cloud_service, status): """Adds an instance to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[hostname] = deployment.name self.host_metadata[hostname] = dict(ansible_ssh_host=dest, ansible_ssh_port=int(ssh_port), instance_status=status, private_id=deployment.private_id) # List of all azure deployments self.push(self.inventory, "azure", hostname) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname) if int(ssh_port) == 22: self.push(self.inventory, "Cloud_services", hostname) # Inventory: Group by region self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) AzureInventory() ansible-2.1.1.0/contrib/inventory/zabbix.ini0000664000175400017540000000025112746444466022120 0ustar jenkinsjenkins00000000000000# Ansible Zabbix external inventory script settings # [zabbix] # Server location server = http://zabbix.example.com/zabbix # Login username = admin password = zabbix ansible-2.1.1.0/contrib/inventory/zabbix.py0000775000175400017540000001011012746444466021767 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2013, Greg Buehler # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### """ Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. Configuration is read from `zabbix.ini`. Tested with Zabbix Server 2.0.6. """ from __future__ import print_function import os, sys import argparse import ConfigParser try: from zabbix_api import ZabbixAPI except: print("Error: Zabbix API library must be installed: pip install zabbix-api.", file=sys.stderr) sys.exit(1) try: import json except: import simplejson as json class ZabbixInventory(object): def read_settings(self): config = ConfigParser.SafeConfigParser() conf_path = './zabbix.ini' if not os.path.exists(conf_path): conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini' if os.path.exists(conf_path): config.read(conf_path) # server if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): self.zabbix_password = config.get('zabbix', 'password') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def hoststub(self): return { 'hosts': [] } def get_host(self, api, name): data = {} return data def get_list(self, api): hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'}) data = {} data[self.defaultgroup] = self.hoststub() for host in hostsData: hostname = host['name'] data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] if not groupname in data: data[groupname] = self.hoststub() data[groupname]['hosts'].append(hostname) return data def __init__(self): self.defaultgroup = 'group_all' self.zabbix_server = None self.zabbix_username = None self.zabbix_password = None self.read_settings() self.read_cli() if self.zabbix_server and self.zabbix_username: try: api = ZabbixAPI(server=self.zabbix_server) api.login(user=self.zabbix_username, password=self.zabbix_password) except BaseException as e: print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr) sys.exit(1) if self.options.host: data = self.get_host(api, self.options.host) print(json.dumps(data, indent=2)) elif self.options.list: data = self.get_list(api) print(json.dumps(data, indent=2)) else: print("usage: --list ..OR.. --host ", file=sys.stderr) sys.exit(1) else: print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr) sys.exit(1) ZabbixInventory() ansible-2.1.1.0/contrib/inventory/zone.py0000775000175400017540000000267212746444466021501 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2015, Dagobert Michelsen # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen,PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [] for l in pipe.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared s = l.split(':') if s[1] != 'global': result['all']['hosts'].append(s[1]) result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'zone' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'zone'})) else: print("Need an argument, either --list or --host ") ansible-2.1.1.0/contrib/README.md0000664000175400017540000000122012746444466017357 0ustar jenkinsjenkins00000000000000inventory ========= Inventory scripts allow you to store your hosts, groups, and variables in any way you like. Examples include discovering inventory from EC2 or pulling it from Cobbler. These could also be used to interface with LDAP or database. chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible with -i to designate the path to the script. You might also need to copy a configuration file with the same name and/or set environment variables, the scripts or configuration files have more details. contributions welcome ===================== Send in pull requests to add plugins of your own. The sky is the limit! ansible-2.1.1.0/docs/0000775000175400017540000000000012746444530015365 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/docs/man/0000775000175400017540000000000012746444530016140 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/docs/man/man1/0000775000175400017540000000000012746444530016774 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/docs/man/man1/ansible-doc.10000664000175400017540000000617012746444524021245 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible-doc .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE\-DOC" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-doc \- show documentation on Ansible modules .SH "SYNOPSIS" .sp ansible\-doc [\-M module_path] [\-l] [\-s] [module\&...] .SH "DESCRIPTION" .sp \fBansible\-doc\fR displays information on modules installed in Ansible libraries\&. It displays a terse listing of modules and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook\&. .SH "OPTIONS" .PP \fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR .RS 4 the \fIDIRECTORY\fR search path to load modules from\&. The default is \fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. .RE .PP \fB\-s\fR, \fB\-\-snippet=\fR .RS 4 Produce a snippet which can be copied into a playbook for modification, like a kind of task template\&. .RE .PP \fB\-l\fR, \fB\-\-list=\fR .RS 4 Produce a terse listing of modules and a short description of each\&. .RE .SH "ENVIRONMENT" .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .SH "FILES" .sp /usr/share/ansible/ \(em Default module library .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp ansible\-doc was originally written by Jan\-Piet Mens\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2012, Jan\-Piet Mens .sp Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\-playbook\fR(1), \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1), \fBansible\-galaxy\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible .SH "AUTHOR" .PP \fB:doctype:manpage\fR .RS 4 Author. .RE ansible-2.1.1.0/docs/man/man1/ansible-doc.1.asciidoc.in0000664000175400017540000000351512746444466023434 0ustar jenkinsjenkins00000000000000ansible-doc(1) ============== :doctype:manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-doc - show documentation on Ansible modules SYNOPSIS -------- ansible-doc [-M module_path] [-l] [-s] [module...] DESCRIPTION ----------- *ansible-doc* displays information on modules installed in Ansible libraries. It displays a terse listing of modules and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook. OPTIONS ------- *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: the 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. *-s*, *--snippet=*:: Produce a snippet which can be copied into a playbook for modification, like a kind of task template. *-l*, *--list=*:: Produce a terse listing of modules and a short description of each. ENVIRONMENT ----------- ANSIBLE_LIBRARY -- Override the default ansible module library path FILES ----- /usr/share/ansible/ -- Default module library /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ ansible-doc was originally written by Jan-Piet Mens. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Jan-Piet Mens Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible-playbook*(1), *ansible*(1), *ansible-pull*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man1/ansible-galaxy.10000664000175400017540000002556712746444525022001 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible-galaxy .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE\-GALAXY" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-galaxy \- manage roles using galaxy\&.ansible\&.com .SH "SYNOPSIS" .sp ansible\-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [\-\-help] [options] \&... .SH "DESCRIPTION" .sp \fBAnsible Galaxy\fR is a shared repository for Ansible roles\&. The ansible\-galaxy command can be used to manage these roles, or for creating a skeleton framework for roles you\(cqd like to upload to Galaxy\&. .SH "COMMON OPTIONS" .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 Show a help message related to the given sub\-command\&. .RE .SH "INSTALL" .sp The \fBinstall\fR sub\-command is used to install roles\&. .SS "USAGE" .sp $ ansible\-galaxy install [options] [\-r FILE | role_name(s)[,version] | tar_file(s)] .sp Roles can be installed in several different ways: .sp .RS 4 .ie n \{\ \h'-04'\(bu\h'+03'\c .\} .el \{\ .sp -1 .IP \(bu 2.3 .\} A username\&.rolename[,version] \- this will install a single role\&. The Galaxy API will be contacted to provide the information about the role, and the corresponding \&.tar\&.gz will be downloaded from \fBgithub\&.com\fR\&. If the version is omitted, the most recent version available will be installed\&. .RE .sp .RS 4 .ie n \{\ \h'-04'\(bu\h'+03'\c .\} .el \{\ .sp -1 .IP \(bu 2.3 .\} A file name, using \fB\-r\fR \- this will install multiple roles listed one per line\&. The format of each line is the same as above: username\&.rolename[,version] .RE .sp .RS 4 .ie n \{\ \h'-04'\(bu\h'+03'\c .\} .el \{\ .sp -1 .IP \(bu 2.3 .\} A \&.tar\&.gz of a valid role you\(cqve downloaded directly from \fBgithub\&.com\fR\&. This is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy\&. .RE .SS "OPTIONS" .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force overwriting an existing role\&. .RE .PP \fB\-i\fR, \fB\-\-ignore\-errors\fR .RS 4 Ignore errors and continue with the next specified role\&. .RE .PP \fB\-n\fR, \fB\-\-no\-deps\fR .RS 4 Don\(cqt download roles listed as dependencies\&. .RE .PP \fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR .RS 4 The path to the directory containing your roles\&. The default is the \fBroles_path\fR configured in your \fBansible\&.cfg\fR file (/etc/ansible/roles if not configured) .RE .PP \fB\-r\fR \fIROLE_FILE\fR, \fB\-\-role\-file=\fR\fIROLE_FILE\fR .RS 4 A file containing a list of roles to be imported, as specified above\&. This option cannot be used if a rolename or \&.tar\&.gz have been specified\&. .RE .SH "REMOVE" .sp The \fBremove\fR sub\-command is used to remove one or more roles\&. .SS "USAGE" .sp $ ansible\-galaxy remove role1 role2 \&... .SS "OPTIONS" .PP \fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR .RS 4 The path to the directory containing your roles\&. The default is the \fBroles_path\fR configured in your \fBansible\&.cfg\fR file (/etc/ansible/roles if not configured) .RE .SH "INIT" .sp The \fBinit\fR command is used to create an empty role suitable for uploading to https://galaxy\&.ansible\&.com (or for roles in general)\&. .SS "USAGE" .sp $ ansible\-galaxy init [options] role_name .SS "OPTIONS" .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force overwriting an existing role\&. .RE .PP \fB\-p\fR \fIINIT_PATH\fR, \fB\-\-init\-path=\fR\fIINIT_PATH\fR .RS 4 The path in which the skeleton role will be created\&.The default is the current working directory\&. .RE .PP \fB\-\-offline\fR .RS 4 Don\(cqt query the galaxy API when creating roles .RE .SH "LIST" .sp The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&. .SS "USAGE" .sp $ ansible\-galaxy list [role_name] .SS "OPTIONS" .PP \fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR .RS 4 The path to the directory containing your roles\&. The default is the \fBroles_path\fR configured in your \fBansible\&.cfg\fR file (/etc/ansible/roles if not configured) .RE .SH "SEARCH" .sp The \fBsearch\fR sub\-command returns a filtered list of roles found on the remote server\&. .SS "USAGE" .sp $ ansible\-galaxy search [options] [searchterm1 searchterm2] .SS "OPTIONS" .PP \fB\-\-galaxy\-tags\fR .RS 4 Provide a comma separated list of Galaxy Tags on which to filter\&. .RE .PP \fB\-\-platforms\fR .RS 4 Provide a comma separated list of Platforms on which to filter\&. .RE .PP \fB\-\-author\fR .RS 4 Specify the username of a Galaxy contributor on which to filter\&. .RE .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .SH "INFO" .sp The \fBinfo\fR sub\-command shows detailed information for a specific role\&. Details returned about the role included information from the local copy as well as information from galaxy\&.ansible\&.com\&. .SS "USAGE" .sp $ ansible\-galaxy info [options] role_name[, version] .SS "OPTIONS" .PP \fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR .RS 4 The path to the directory containing your roles\&. The default is the \fBroles_path\fR configured in your \fBansible\&.cfg\fR file (/etc/ansible/roles if not configured) .RE .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .SH "LOGIN" .sp The \fBlogin\fR sub\-command is used to authenticate with galaxy\&.ansible\&.com\&. Authentication is required to use the import, delete and setup commands\&. It will authenticate the user, retrieve a token from Galaxy, and store it in the user\(cqs home directory\&. .SS "USAGE" .sp $ ansible\-galaxy login [options] .sp The \fBlogin\fR sub\-command prompts for a \fBGitHub\fR username and password\&. It does NOT send your password to Galaxy\&. It actually authenticates with GitHub and creates a personal access token\&. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token\&. After authentication completes the \fBGitHub\fR personal access token is destroyed\&. .sp If you do not wish to use your GitHub password, or if you have two\-factor authentication enabled with GitHub, use the \fB\-\-github\-token\fR option to pass a personal access token that you create\&. Log into GitHub, go to Settings and click on Personal Access Token to create a token\&. .SS "OPTIONS" .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .PP \fB\-\-github\-token\fR .RS 4 Authenticate using a \fBGitHub\fR personal access token rather than a password\&. .RE .SH "IMPORT" .sp Import a role from \fBGitHub\fR to galaxy\&.ansible\&.com\&. Requires the user first authenticate with galaxy\&.ansible\&.com using the \fBlogin\fR subcommand\&. .SS "USAGE" .sp $ ansible\-galaxy import [options] github_user github_repo .SS "OPTIONS" .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .PP \fB\-\-branch\fR .RS 4 Provide a specific branch to import\&. When a branch is not specified the branch found in meta/main\&.yml is used\&. If no branch is specified in meta/main\&.yml, the repo\(cqs default branch (usually master) is used\&. .RE .SH "DELETE" .sp The \fBdelete\fR sub\-command will delete a role from galaxy\&.ansible\&.com\&. Requires the user first authenticate with galaxy\&.ansible\&.com using the \fBlogin\fR subcommand\&. .SS "USAGE" .sp $ ansible\-galaxy delete [options] github_user github_repo .SS "OPTIONS" .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .SH "SETUP" .sp The \fBsetup\fR sub\-command creates an integration point for \fBTravis CI\fR, enabling galaxy\&.ansible\&.com to receive notifications from \fBTravis\fR on build completion\&. Requires the user first authenticate with galaxy\&.ansible\&.com using the \fBlogin\fR subcommand\&. .SS "USAGE" .sp $ ansible\-galaxy setup [options] source github_user github_repo secret .sp .RS 4 .ie n \{\ \h'-04'\(bu\h'+03'\c .\} .el \{\ .sp -1 .IP \(bu 2.3 .\} Use \fBtravis\fR as the source value\&. In the future additional source values may be added\&. .RE .sp .RS 4 .ie n \{\ \h'-04'\(bu\h'+03'\c .\} .el \{\ .sp -1 .IP \(bu 2.3 .\} Provide your \fBTravis\fR user token as the secret\&. The token is not stored by galaxy\&.ansible\&.com\&. A hash is created using github_user, github_repo and your token\&. The hash value is what actually gets stored\&. .RE .SS "OPTIONS" .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore TLS certificate errors\&. .RE .PP \fB\-s\fR, \fB\-\-server\fR .RS 4 Override the default server https://galaxy\&.ansible\&.com\&. .RE .PP \-\-list .RS 4 Show your configured integrations\&. Provids the ID of each integration which can be used with the remove option\&. .RE .PP \-\-remove .RS 4 Remove a specific integration\&. Provide the ID of the integration to be removed\&. .RE .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2014, Michael DeHaan .sp Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1), \fBansible\-playbook\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.1.1.0/docs/man/man1/ansible-galaxy.1.asciidoc.in0000664000175400017540000002050412746444466024151 0ustar jenkinsjenkins00000000000000ansible-galaxy(1) =================== :doctype: manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-galaxy - manage roles using galaxy.ansible.com SYNOPSIS -------- ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION ----------- *Ansible Galaxy* is a shared repository for Ansible roles. The ansible-galaxy command can be used to manage these roles, or for creating a skeleton framework for roles you'd like to upload to Galaxy. COMMON OPTIONS -------------- *-h*, *--help*:: Show a help message related to the given sub-command. INSTALL ------- The *install* sub-command is used to install roles. USAGE ~~~~~ $ ansible-galaxy install [options] [-r FILE | role_name(s)[,version] | tar_file(s)] Roles can be installed in several different ways: * A username.rolename[,version] - this will install a single role. The Galaxy API will be contacted to provide the information about the role, and the corresponding .tar.gz will be downloaded from *github.com*. If the version is omitted, the most recent version available will be installed. * A file name, using *-r* - this will install multiple roles listed one per line. The format of each line is the same as above: username.rolename[,version] * A .tar.gz of a valid role you've downloaded directly from *github.com*. This is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy. OPTIONS ~~~~~~~ *-f*, *--force*:: Force overwriting an existing role. *-i*, *--ignore-errors*:: Ignore errors and continue with the next specified role. *-n*, *--no-deps*:: Don't download roles listed as dependencies. *-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) *-r* 'ROLE_FILE', *--role-file=*'ROLE_FILE':: A file containing a list of roles to be imported, as specified above. This option cannot be used if a rolename or .tar.gz have been specified. REMOVE ------ The *remove* sub-command is used to remove one or more roles. USAGE ~~~~~ $ ansible-galaxy remove role1 role2 ... OPTIONS ~~~~~~~ *-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) INIT ---- The *init* command is used to create an empty role suitable for uploading to https://galaxy.ansible.com (or for roles in general). USAGE ~~~~~ $ ansible-galaxy init [options] role_name OPTIONS ~~~~~~~ *-f*, *--force*:: Force overwriting an existing role. *-p* 'INIT_PATH', *--init-path=*'INIT_PATH':: The path in which the skeleton role will be created.The default is the current working directory. *--offline*:: Don't query the galaxy API when creating roles LIST ---- The *list* sub-command is used to show what roles are currently instaled. You can specify a role name, and if installed only that role will be shown. USAGE ~~~~~ $ ansible-galaxy list [role_name] OPTIONS ~~~~~~~ *-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) SEARCH ------ The *search* sub-command returns a filtered list of roles found on the remote server. USAGE ~~~~~ $ ansible-galaxy search [options] [searchterm1 searchterm2] OPTIONS ~~~~~~~ *--galaxy-tags*:: Provide a comma separated list of Galaxy Tags on which to filter. *--platforms*:: Provide a comma separated list of Platforms on which to filter. *--author*:: Specify the username of a Galaxy contributor on which to filter. *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. INFO ---- The *info* sub-command shows detailed information for a specific role. Details returned about the role included information from the local copy as well as information from galaxy.ansible.com. USAGE ~~~~~ $ ansible-galaxy info [options] role_name[, version] OPTIONS ~~~~~~~ *-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. LOGIN ----- The *login* sub-command is used to authenticate with galaxy.ansible.com. Authentication is required to use the import, delete and setup commands. It will authenticate the user, retrieve a token from Galaxy, and store it in the user's home directory. USAGE ~~~~~ $ ansible-galaxy login [options] The *login* sub-command prompts for a *GitHub* username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the *GitHub* personal access token is destroyed. If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the *--github-token* option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. OPTIONS ~~~~~~~ *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. *--github-token*:: Authenticate using a *GitHub* personal access token rather than a password. IMPORT ------ Import a role from *GitHub* to galaxy.ansible.com. Requires the user first authenticate with galaxy.ansible.com using the *login* subcommand. USAGE ~~~~~ $ ansible-galaxy import [options] github_user github_repo OPTIONS ~~~~~~~ *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. *--branch*:: Provide a specific branch to import. When a branch is not specified the branch found in meta/main.yml is used. If no branch is specified in meta/main.yml, the repo's default branch (usually master) is used. DELETE ------ The *delete* sub-command will delete a role from galaxy.ansible.com. Requires the user first authenticate with galaxy.ansible.com using the *login* subcommand. USAGE ~~~~~ $ ansible-galaxy delete [options] github_user github_repo OPTIONS ~~~~~~~ *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. SETUP ----- The *setup* sub-command creates an integration point for *Travis CI*, enabling galaxy.ansible.com to receive notifications from *Travis* on build completion. Requires the user first authenticate with galaxy.ansible.com using the *login* subcommand. USAGE ~~~~~ $ ansible-galaxy setup [options] source github_user github_repo secret * Use *travis* as the source value. In the future additional source values may be added. * Provide your *Travis* user token as the secret. The token is not stored by galaxy.ansible.com. A hash is created using github_user, github_repo and your token. The hash value is what actually gets stored. OPTIONS ~~~~~~~ *-c*, *--ignore-certs*:: Ignore TLS certificate errors. *-s*, *--server*:: Override the default server https://galaxy.ansible.com. --list:: Show your configured integrations. Provids the ID of each integration which can be used with the remove option. --remove:: Remove a specific integration. Provide the ID of the integration to be removed. AUTHOR ------ Ansible was originally written by Michael DeHaan. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2014, Michael DeHaan Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-playbook*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man1/ansible-playbook.10000664000175400017540000002124312746444522022314 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible-playbook .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE\-PLAYBOOK" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-playbook \- run an ansible playbook .SH "SYNOPSIS" .sp ansible\-playbook \&... [options] .SH "DESCRIPTION" .sp \fBAnsible playbooks\fR are a configuration and multinode deployment system\&. Ansible\-playbook is the tool used to run them\&. See the project home page (link below) for more information\&. .SH "ARGUMENTS" .PP \fBfilename\&.yml\fR .RS 4 The names of one or more YAML format files to run as ansible playbooks\&. .RE .SH "OPTIONS" .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords\&. .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 Ask for privilege escalation password\&. .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 Prompt for the connection password, if it is needed for the transport used\&. For example, using ssh and not having a key\-based authentication with ssh\-agent\&. .RE .PP \fB\-\-ask\-su\-pass\fR .RS 4 Prompt for su password, used with \-\-su (deprecated, use become)\&. .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 Prompt for the password to use with \-\-sudo, if any (deprecated, use become)\&. .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 Prompt for vault password\&. .RE .PP \fB\-C\fR, \fB\-\-check\fR .RS 4 Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&. .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR .RS 4 Connection type to use\&. Most common options are \fIparamiko\fR (SSH), \fIssh\fR, \fIwinrm\fR and \fIlocal\fR\&. \fIlocal\fR is mostly useful for crontab or kickstarts\&. .RE .PP \fB\-D\fR, \fB\-\-diff\fR .RS 4 When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&. .RE .PP \fB\-e\fR \fIEXTRA_VARS\fR, \fB\-\-extra\-vars=\fR\fIEXTRA_VARS\fR .RS 4 Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. .RE .PP \fB\-\-flush\-cache\fR .RS 4 Clear the fact cache\&. .RE .PP \fB\-\-force\-handlers\fR .RS 4 Run handlers even if a task fails\&. .RE .PP \fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR .RS 4 Level of parallelism\&. \fINUM\fR is specified as an integer, the default is 5\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 Show help page and exit .RE .PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR .RS 4 The \fIPATH\fR to the inventory, which defaults to \fI/etc/ansible/hosts\fR\&. Alternatively, you can use a comma\-separated list of hosts or a single host with a trailing comma \fIhost,\fR\&. .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR .RS 4 Further limits the selected host/group patterns\&. You can prefix it with \fI~\fR to indicate that the pattern is a regex\&. .RE .PP \fB\-\-list\-hosts\fR .RS 4 Outputs a list of matching hosts; does not execute anything else\&. .RE .PP \fB\-\-list\-tags\fR .RS 4 List all available tags; does not execute anything else\&. .RE .PP \fB\-\-list\-tasks\fR .RS 4 List all tasks that would be executed; does not execute anything else\&. .RE .PP \fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR .RS 4 The \fIDIRECTORY\fR search path to load modules from\&. The default is \fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. .RE .PP \fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR .RS 4 Use this file to authenticate the connection .RE .PP \fB\-\-start\-at\-task=\fR\fISTART_AT\fR .RS 4 Start the playbook at the task matching this name\&. .RE .PP \fB\-\-step\fR .RS 4 One\-step\-at\-a\-time: confirm each task before running\&. .RE .PP \fB\-S\fR, \-\-su* .RS 4 Run operations with su (deprecated, use become) .RE .PP \fB\-R SU\-USER\fR, \fB\-\-su\-user=\fR\fISU_USER\fR .RS 4 run operations with su as this user (default=root) (deprecated, use become) .RE .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 Run the command as the user given by \-u and sudo to root (deprecated, use become)\&. .RE .PP \fB\-\-ssh\-common\-args=\fR\fI\*(Aq\-o ProxyCommand="ssh \-W %h:%p \&..." \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp/scp/ssh command\-line\&. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified\&. .RE .PP \fB\-\-sftp\-extra\-args=\fR\fI\*(Aq\-f \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp command\-line\&. .RE .PP \fB\-\-scp\-extra\-args=\fR\fI\*(Aq\-l \&...\fR\*(Aq .RS 4 Add the specified arguments to any scp command\-line\&. .RE .PP \fB\-\-ssh\-extra\-args=\fR\fI\*(Aq\-R \&...\fR\*(Aq .RS 4 Add the specified arguments to any ssh command\-line\&. .RE .PP \fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR .RS 4 Sudo to \fISUDO_USERNAME\fR default is root\&. (deprecated, use become)\&. .RE .PP \fB\-\-skip\-tags=\fR\fISKIP_TAGS\fR .RS 4 Only run plays and tasks whose tags do not match these values\&. .RE .PP \fB\-\-syntax\-check\fR .RS 4 Look for syntax errors in the playbook, but don\(cqt run anything .RE .PP \fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR .RS 4 Only run plays and tasks tagged with these values\&. .RE .PP \fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR .RS 4 Connection timeout to use when trying to talk to hosts, in \fISECONDS\fR\&. .RE .PP \fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR .RS 4 Use this \fIUSERNAME\fR to login to the target host, instead of the current user\&. .RE .PP \fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR .RS 4 Vault password file\&. .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. .RE .PP \fB\-\-version\fR .RS 4 Show program\(cqs version number and exit\&. .RE .SH "EXIT STATUS" .sp \fB0\fR \(em OK or no hosts matched .sp \fB1\fR \(em Error .sp \fB2\fR \(em One or more hosts failed .sp \fB3\fR \(em One or more hosts were unreachable .sp \fB4\fR \(em Parser error .sp \fB5\fR \(em Bad or incomplete options .sp \fB99\fR \(em User interrupted execution .sp \fB250\fR \(em Unexpected error .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_INVENTORY \(em Override the default ansible inventory file .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/hosts \(em Default inventory file .sp /usr/share/ansible/ \(em Default module library .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2012, Michael DeHaan .sp Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1), \fBansible\-vault\fR(1), \fBansible\-galaxy\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible .SH "AUTHOR" .PP \fB:doctype:manpage\fR .RS 4 Author. .RE ansible-2.1.1.0/docs/man/man1/ansible-playbook.1.asciidoc.in0000664000175400017540000001444212746444466024510 0ustar jenkinsjenkins00000000000000ansible-playbook(1) =================== :doctype:manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-playbook - run an ansible playbook SYNOPSIS -------- ansible-playbook ... [options] DESCRIPTION ----------- *Ansible playbooks* are a configuration and multinode deployment system. Ansible-playbook is the tool used to run them. See the project home page (link below) for more information. ARGUMENTS --------- *filename.yml*:: The names of one or more YAML format files to run as ansible playbooks. OPTIONS ------- *-b*, *--become*:: Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords. *-K*, *--ask-become-pass*:: Ask for privilege escalation password. *-k*, *--ask-pass*:: Prompt for the connection password, if it is needed for the transport used. For example, using ssh and not having a key-based authentication with ssh-agent. *--ask-su-pass*:: Prompt for su password, used with --su (deprecated, use become). *--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). *--ask-vault-pass*:: Prompt for vault password. *-C*, *--check*:: Do not make any changes on the remote system, but test resources to see what might have changed. Note this can not scan all possible resource types and is only a simulation. *-c* 'CONNECTION', *--connection=*'CONNECTION':: Connection type to use. Most common options are 'paramiko' (SSH), 'ssh', 'winrm' and 'local'. 'local' is mostly useful for crontab or kickstarts. *-D*, *--diff*:: When changing any templated files, show the unified diffs of how they changed. When used with --check, shows how the files would have changed if --check were not used. *-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS':: Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify the file preceded by @ (e.g. @vars.yml). *--flush-cache*:: Clear the fact cache. *--force-handlers*:: Run handlers even if a task fails. *-f* 'NUM', *--forks=*'NUM':: Level of parallelism. 'NUM' is specified as an integer, the default is 5. *-h*, *--help*:: Show help page and exit *-i* 'PATH', *--inventory=*'PATH':: The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. Alternatively, you can use a comma-separated list of hosts or a single host with a trailing comma 'host,'. *-l* 'SUBSET', *--limit=*'SUBSET':: Further limits the selected host/group patterns. You can prefix it with '~' to indicate that the pattern is a regex. *--list-hosts*:: Outputs a list of matching hosts; does not execute anything else. *--list-tags*:: List all available tags; does not execute anything else. *--list-tasks*:: List all tasks that would be executed; does not execute anything else. *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: The 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. *--private-key=*'PRIVATE_KEY_FILE':: Use this file to authenticate the connection *--start-at-task=*'START_AT':: Start the playbook at the task matching this name. *--step*:: One-step-at-a-time: confirm each task before running. *-S*, --su*:: Run operations with su (deprecated, use become) *-R SU-USER*, *--su-user=*'SU_USER':: run operations with su as this user (default=root) (deprecated, use become) *-s*, *--sudo*:: Run the command as the user given by -u and sudo to root (deprecated, use become). *--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: Add the specified arguments to any sftp/scp/ssh command-line. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified. *--sftp-extra-args=*''-f ...'':: Add the specified arguments to any sftp command-line. *--scp-extra-args=*''-l ...'':: Add the specified arguments to any scp command-line. *--ssh-extra-args=*''-R ...'':: Add the specified arguments to any ssh command-line. *-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: Sudo to 'SUDO_USERNAME' default is root. (deprecated, use become). *--skip-tags=*'SKIP_TAGS':: Only run plays and tasks whose tags do not match these values. *--syntax-check*:: Look for syntax errors in the playbook, but don't run anything *-t*, 'TAGS', *--tags=*'TAGS':: Only run plays and tasks tagged with these values. *-T* 'SECONDS', *--timeout=*'SECONDS':: Connection timeout to use when trying to talk to hosts, in 'SECONDS'. *-u* 'USERNAME', *--user=*'USERNAME':: Use this 'USERNAME' to login to the target host, instead of the current user. *--vault-password-file=*'VAULT_PASSWORD_FILE':: Vault password file. *-v*, *--verbose*:: Verbose mode, more output from successful actions will be shown. Give up to three times for more output. *--version*:: Show program's version number and exit. EXIT STATUS ----------- *0* -- OK or no hosts matched *1* -- Error *2* -- One or more hosts failed *3* -- One or more hosts were unreachable *4* -- Parser error *5* -- Bad or incomplete options *99* -- User interrupted execution *250* -- Unexpected error ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/hosts -- Default inventory file /usr/share/ansible/ -- Default module library /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Michael DeHaan Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man1/ansible-pull.10000664000175400017540000002002312746444523021444 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-pull \- pull playbooks from VCS server and run them using this machine as the target\&. .SH "SYNOPSIS" .sp ansible\-pull \-U URL [options] [ ] .SH "DESCRIPTION" .sp \fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq\&. .sp Use ansible\-pull to set up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository\&. This inverts the default \fBpush\fR architecture of ansible into a \fBpull\fR architecture, which has near\-limitless scaling potential\&. .sp The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible\-pull\&. .sp This is useful both for extreme scale\-out as well as periodic remediation\&. Usage of the \fIfetch\fR module to retrieve logs from ansible\-pull runs would be an excellent way to gather and analyze remote logs from ansible\-pull\&. .SH "OPTIONAL ARGUMENT" .PP \fBfilename\&.yml\fR .RS 4 The name of one the YAML format files to run as an ansible playbook\&. This can be a relative path within the checkout\&. If not provided, ansible\-pull will look for a playbook based on the host\(cqs fully\-qualified domain name, on the host hostname and finally a playbook named \fBlocal\&.yml\fR\&. .RE .SH "OPTIONS" .PP \fB\-\-accept\-host\-key\fR .RS 4 Adds the hostkey for the repo URL if not already added\&. .RE .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords\&. .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 Ask for privilege escalation password\&. .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 Prompt for the connection password, if it is needed for the transport used\&. For example, using ssh and not having a key\-based authentication with ssh\-agent\&. .RE .PP \fB\-\-ask\-su\-pass\fR .RS 4 Prompt for su password, used with \-\-su (deprecated, use become)\&. .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 Prompt for the password to use with \-\-sudo, if any (deprecated, use become)\&. .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 Prompt for vault password\&. .RE .PP \fB\-C\fR \fICHECKOUT\fR, \fB\-\-checkout=\fR\fICHECKOUT\fR .RS 4 Branch/Tag/Commit to checkout\&. If not provided, uses default behavior of module used to check out playbook repository\&. .RE .PP \fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR .RS 4 Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&. .RE .PP \fB\-e\fR \fIEXTRA_VARS\fR, \fB\-\-extra\-vars=\fR\*(AqEXTRA_VARS .RS 4 Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. .RE .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force running of playbook even if unable to update playbook repository\&. This can be useful, for example, to enforce run\-time state when a network connection may not always be up or possible\&. .RE .PP \fB\-\-full\fR .RS 4 Do a full clone of the repository\&. By default ansible\-pull will do a shallow clone based on the last revision\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 Show the help message and exit\&. .RE .PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR .RS 4 The \fIPATH\fR to the inventory, which defaults to \fI/etc/ansible/hosts\fR\&. Alternatively you can use a comma separated list of hosts or single host with traling comma \fIhost,\fR\&. .RE .PP \fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR .RS 4 Use this file to authenticate the connection\&. .RE .PP \fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR .RS 4 Module used to checkout playbook repository\&. Defaults to git\&. .RE .PP \fB\-o\fR, \fB\-\-only\-if\-changed\fR .RS 4 Only run the playbook if the repository has been updated\&. .RE .PP \fB\-\-purge\fR .RS 4 Purge the checkout after the playbook is run\&. .RE .PP \fB\-s\fR \fISLEEP\fR, \fB\-\-sleep=\fR\fISLEEP\fR .RS 4 Sleep for random interval (between 0 and SLEEP number of seconds) before starting\&. This is a useful way ot disperse git requests\&. .RE .PP \fB\-\-ssh\-common\-args=\fR\fI\*(Aq\-o ProxyCommand="ssh \-W %h:%p \&..." \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp/scp/ssh command\-line\&. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified\&. .RE .PP \fB\-\-sftp\-extra\-args=\fR\fI\*(Aq\-f \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp command\-line\&. .RE .PP \fB\-\-scp\-extra\-args=\fR\fI\*(Aq\-l \&...\fR\*(Aq .RS 4 Add the specified arguments to any scp command\-line\&. .RE .PP \fB\-\-ssh\-extra\-args=\fR\fI\*(Aq\-R \&...\fR\*(Aq .RS 4 Add the specified arguments to any ssh command\-line\&. .RE .PP \fB\-t\fR \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR .RS 4 Only run plays and tasks tagged with these values\&. .RE .PP \fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR .RS 4 URL of the playbook repository to checkout\&. .RE .PP \fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR .RS 4 Vault password file\&. .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 Pass \-vvv to ansible\-playbook\&. .RE .SH "INVENTORY" .sp Ansible stores the hosts it can potentially operate on in an inventory\&. This can be an ini\-like file, a script, directory or a list\&. The ini syntax is one host per line\&. Groups headers are allowed and are included on their own line, enclosed in square brackets that start the line\&. .sp Ranges of hosts are also supported\&. For more information and additional options, see the documentation on http://docs\&.ansible\&.com/\&. .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_INVENTORY \(em Override the default ansible inventory file .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/hosts \(em Default inventory file .sp /usr/share/ansible/ \(em Default module library .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2012, Michael DeHaan Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1) \fBansible\-playbook\fR(1), \fBansible\-doc\fR(1), \fBansible\-vault\fR(1), \fBansible\-galaxy\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible .SH "AUTHOR" .PP \fB:doctype:manpage\fR .RS 4 Author. .RE ansible-2.1.1.0/docs/man/man1/ansible-pull.1.asciidoc.in0000664000175400017540000001374212746444466023646 0ustar jenkinsjenkins00000000000000ansible(1) ========= :doctype:manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-pull - pull playbooks from VCS server and run them using this machine as the target. SYNOPSIS -------- ansible-pull -U URL [options] [ ] DESCRIPTION ----------- *Ansible* is an extra-simple tool/framework/API for doing \'remote things'. Use ansible-pull to set up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository. This inverts the default *push* architecture of ansible into a *pull* architecture, which has near-limitless scaling potential. The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull. This is useful both for extreme scale-out as well as periodic remediation. Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an excellent way to gather and analyze remote logs from ansible-pull. OPTIONAL ARGUMENT ----------------- *filename.yml*:: The name of one the YAML format files to run as an ansible playbook. This can be a relative path within the checkout. If not provided, ansible-pull will look for a playbook based on the host's fully-qualified domain name, on the host hostname and finally a playbook named *local.yml*. OPTIONS ------- *--accept-host-key*:: Adds the hostkey for the repo URL if not already added. *-b*, *--become*:: Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords. *-K*, *--ask-become-pass*:: Ask for privilege escalation password. *-k*, *--ask-pass*:: Prompt for the connection password, if it is needed for the transport used. For example, using ssh and not having a key-based authentication with ssh-agent. *--ask-su-pass*:: Prompt for su password, used with --su (deprecated, use become). *--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). *--ask-vault-pass*:: Prompt for vault password. *-C* 'CHECKOUT', *--checkout=*'CHECKOUT':: Branch/Tag/Commit to checkout. If not provided, uses default behavior of module used to check out playbook repository. *-d* 'DEST', *--directory=*'DEST':: Directory to checkout repository into. If not provided, a subdirectory of ~/.ansible/pull/ will be used. *-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS:: Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify the file preceded by @ (e.g. @vars.yml). *-f*, *--force*:: Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. *--full*:: Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision. *-h*, *--help*:: Show the help message and exit. *-i* 'PATH', *--inventory=*'PATH':: The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'. *--private-key=*'PRIVATE_KEY_FILE':: Use this file to authenticate the connection. *-m* 'NAME', *--module-name=*'NAME':: Module used to checkout playbook repository. Defaults to git. *-o*, *--only-if-changed*:: Only run the playbook if the repository has been updated. *--purge*:: Purge the checkout after the playbook is run. *-s* 'SLEEP', *--sleep=*'SLEEP':: Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests. *--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: Add the specified arguments to any sftp/scp/ssh command-line. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified. *--sftp-extra-args=*''-f ...'':: Add the specified arguments to any sftp command-line. *--scp-extra-args=*''-l ...'':: Add the specified arguments to any scp command-line. *--ssh-extra-args=*''-R ...'':: Add the specified arguments to any ssh command-line. *-t* 'TAGS', *--tags=*'TAGS':: Only run plays and tasks tagged with these values. *-U* 'URL', *--url=*'URL':: URL of the playbook repository to checkout. *--vault-password-file=*'VAULT_PASSWORD_FILE':: Vault password file. *-v*, *--verbose*:: Pass -vvv to ansible-playbook. INVENTORY --------- Ansible stores the hosts it can potentially operate on in an inventory. This can be an ini-like file, a script, directory or a list. The ini syntax is one host per line. Groups headers are allowed and are included on their own line, enclosed in square brackets that start the line. Ranges of hosts are also supported. For more information and additional options, see the documentation on http://docs.ansible.com/. ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/hosts -- Default inventory file /usr/share/ansible/ -- Default module library /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Michael DeHaan Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1) *ansible-playbook*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man1/ansible-vault.10000664000175400017540000001452012746444526021633 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible-vault .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE\-VAULT" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-vault \- manage encrypted ansible vars files (YAML)\&. .SH "SYNOPSIS" .sp ansible\-vault [create|decrypt|edit|encrypt|rekey] [\-\-help] [options] file_name .SH "DESCRIPTION" .sp \fBansible\-vault\fR can encrypt any structured data file used by Ansible\&. This can include \fBgroup_vars/\fR or \fBhost_vars/\fR inventory variables, variables loaded by \fBinclude_vars\fR or \fBvars_files\fR, or variable files passed on the ansible\-playbook command line with \fB\-e @file\&.yml\fR or \fB\-e @file\&.json\fR\&. Role variables and defaults are also included! .sp Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault\&. If you\(cqd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted\&. .sp The password used with vault currently must be the same for all files you wish to use together at the same time\&. .SH "COMMON OPTIONS" .sp The following options are available to all sub\-commands: .PP \fB\-\-vault\-password\-file=\fR\fIFILE\fR .RS 4 A file containing the vault password to be used during the encryption/decryption steps\&. Be sure to keep this file secured if it is used\&. If the file is executable, it will be run and its standard output will be used as the password\&. .RE .PP \fB\-\-new\-vault\-password\-file=\fR\fIFILE\fR .RS 4 A file containing the new vault password to be used when rekeying a file\&. Be sure to keep this file secured if it is used\&. If the file is executable, it will be run and its standard output will be used as the password\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 Show a help message related to the given sub\-command\&. .RE .sp If \fI\-\-valut\-password\-file\fR is not supplied ansib\-vault will automatically prompt for passwords as required\&. .SH "CREATE" .sp \fB$ ansible\-vault create [options] FILE\fR .sp The \fBcreate\fR sub\-command is used to initialize a new encrypted file\&. .sp After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vi\&. Once you are done with the editor session, the file will be saved as encrypted data\&. .sp The default cipher is AES (which is shared\-secret based)\&. .SH "EDIT" .sp \fB$ ansible\-vault edit [options] FILE\fR .sp The \fBedit\fR sub\-command is used to modify a file which was previously encrypted using ansible\-vault\&. .sp This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file\&. .SH "REKEY" .sp \fB$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N]\fR .sp The \fBrekey\fR command is used to change the password on a vault\-encrypted files\&. This command can update multiple files at once\&. .SH "ENCRYPT" .sp \fB$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N]\fR .sp The \fBencrypt\fR sub\-command is used to encrypt pre\-existing data files\&. As with the \fBrekey\fR command, you can specify multiple files in one command\&. .sp The \fBencrypt\fR command accepts an \fB\-\-output FILENAME\fR option to determine where encrypted output is stored\&. With this option, input is read from the (at most one) filename given on the command line; if no input file is given, input is read from stdin\&. Either the input or the output file may be given as \fI\-\fR for stdin and stdout respectively\&. If neither input nor output file is given, the command acts as a filter, reading plaintext from stdin and writing it to stdout\&. .sp Thus any of the following invocations can be used: .sp \fB$ ansible\-vault encrypt\fR .sp \fB$ ansible\-vault encrypt \-\-output OUTFILE\fR .sp \fB$ ansible\-vault encrypt INFILE \-\-output OUTFILE\fR .sp \fB$ echo secret|ansible\-vault encrypt \-\-output OUTFILE\fR .sp Reading from stdin and writing only encrypted output is a good way to prevent sensitive data from ever hitting disk (either interactively or from a script)\&. .SH "DECRYPT" .sp \fB$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N]\fR .sp The \fBdecrypt\fR sub\-command is used to remove all encryption from data files\&. The files will be stored as plain\-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data\&. In most cases, users will want to use the \fBedit\fR sub\-command to modify the files securely\&. .sp As with \fBencrypt\fR, the \fBdecrypt\fR subcommand also accepts the \fB\-\-output FILENAME\fR option to specify where plaintext output is stored, and stdin/stdout is handled as described above\&. .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2014, Michael DeHaan .sp Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1), \fBansible\-playbook\fR(1), \fBansible\-galaxy\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.1.1.0/docs/man/man1/ansible-vault.1.asciidoc.in0000664000175400017540000001155212746444466024022 0ustar jenkinsjenkins00000000000000ansible-vault(1) ================ :doctype: manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-vault - manage encrypted ansible vars files (YAML). SYNOPSIS -------- ansible-vault [create|decrypt|edit|encrypt|rekey] [--help] [options] file_name DESCRIPTION ----------- *ansible-vault* can encrypt any structured data file used by Ansible. This can include *group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or *vars_files*, or variable files passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*. Role variables and defaults are also included! Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you’d like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. The password used with vault currently must be the same for all files you wish to use together at the same time. COMMON OPTIONS -------------- The following options are available to all sub-commands: *--vault-password-file=*'FILE':: A file containing the vault password to be used during the encryption/decryption steps. Be sure to keep this file secured if it is used. If the file is executable, it will be run and its standard output will be used as the password. *--new-vault-password-file=*'FILE':: A file containing the new vault password to be used when rekeying a file. Be sure to keep this file secured if it is used. If the file is executable, it will be run and its standard output will be used as the password. *-h*, *--help*:: Show a help message related to the given sub-command. If '--valut-password-file' is not supplied ansib-vault will automatically prompt for passwords as required. CREATE ------ *$ ansible-vault create [options] FILE* The *create* sub-command is used to initialize a new encrypted file. After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vi. Once you are done with the editor session, the file will be saved as encrypted data. The default cipher is AES (which is shared-secret based). EDIT ---- *$ ansible-vault edit [options] FILE* The *edit* sub-command is used to modify a file which was previously encrypted using ansible-vault. This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file. REKEY ----- *$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N]* The *rekey* command is used to change the password on a vault-encrypted files. This command can update multiple files at once. ENCRYPT ------- *$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N]* The *encrypt* sub-command is used to encrypt pre-existing data files. As with the *rekey* command, you can specify multiple files in one command. The *encrypt* command accepts an *--output FILENAME* option to determine where encrypted output is stored. With this option, input is read from the (at most one) filename given on the command line; if no input file is given, input is read from stdin. Either the input or the output file may be given as '-' for stdin and stdout respectively. If neither input nor output file is given, the command acts as a filter, reading plaintext from stdin and writing it to stdout. Thus any of the following invocations can be used: *$ ansible-vault encrypt* *$ ansible-vault encrypt --output OUTFILE* *$ ansible-vault encrypt INFILE --output OUTFILE* *$ echo secret|ansible-vault encrypt --output OUTFILE* Reading from stdin and writing only encrypted output is a good way to prevent sensitive data from ever hitting disk (either interactively or from a script). DECRYPT ------- *$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N]* The *decrypt* sub-command is used to remove all encryption from data files. The files will be stored as plain-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data. In most cases, users will want to use the *edit* sub-command to modify the files securely. As with *encrypt*, the *decrypt* subcommand also accepts the *--output FILENAME* option to specify where plaintext output is stored, and stdin/stdout is handled as described above. AUTHOR ------ Ansible was originally written by Michael DeHaan. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2014, Michael DeHaan Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-playbook*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man1/ansible.10000664000175400017540000002204412746444521020475 0ustar jenkinsjenkins00000000000000'\" t .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 07/28/2016 .\" Manual: System administration commands .\" Source: Ansible 2.1.1.0 .\" Language: English .\" .TH "ANSIBLE" "1" "07/28/2016" "Ansible 2\&.1\&.1\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible \- run a task on a target host(s) .SH "SYNOPSIS" .sp ansible [\-m module_name] [\-a args] [options] .SH "DESCRIPTION" .sp \fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq\&. This is the adhoc command that allows for a \*(Aqsingle task playbook\*(Aq run\&. .SH "ARGUMENTS" .PP \fBhost\-pattern\fR .RS 4 A name of a group in the inventory, a shell\-like glob selecting hosts in inventory or any combination of the two separated by commas\&. .RE .SH "OPTIONS" .PP \fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq .RS 4 The \fIARGUMENTS\fR to pass to the module\&. .RE .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords\&. .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 Ask for privilege escalation password\&. .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 Prompt for the connection password, if it is needed for the transport used\&. For example, using ssh and not having a key\-based authentication with ssh\-agent\&. .RE .PP \fB\-\-ask\-su\-pass\fR .RS 4 Prompt for su password, used with \-\-su (deprecated, use become)\&. .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 Prompt for the password to use with \-\-sudo, if any (deprecated, use become)\&. .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 Prompt for vault password\&. .RE .PP \fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR .RS 4 Run commands in the background, killing the task after \fINUM\fR seconds\&. .RE .PP \fB\-\-become\-method=\fR\fIBECOME_METHOD\fR .RS 4 Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas | doas | dzdo ] .RE .PP \fB\-\-become\-user=\fR\fIBECOME_USER\fR .RS 4 Run operations as this user (default=root)\&. .RE .PP \fB\-C\fR, \fB\-\-check\fR .RS 4 Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&. .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR .RS 4 Connection type to use\&. Most common options are \fIparamiko\fR (SSH), \fIssh\fR, \fIwinrm\fR and \fIlocal\fR\&. \fIlocal\fR is mostly useful for crontab or kickstarts\&. .RE .PP \fB\-e\fR \fIEXTRA_VARS, \fR\fI\fB\-\-extra\-vars=\fR\fR\fI\*(AqEXTRA_VARS\fR .RS 4 Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. .RE .PP \fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR .RS 4 Level of parallelism\&. \fINUM\fR is specified as an integer, the default is 5\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 Show help message and exit\&. .RE .PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR .RS 4 The \fIPATH\fR to the inventory, which defaults to \fI/etc/ansible/hosts\fR\&. Alternatively you can use a comma separated list of hosts or single host with traling comma \fIhost,\fR\&. .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR .RS 4 Further limits the selected host/group patterns\&. You can prefix it with \fI~\fR to indicate that the pattern is a regex\&. .RE .PP \fB\-\-list\-hosts\fR .RS 4 Outputs a list of matching hosts; does not execute anything else\&. .RE .PP \fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR .RS 4 Execute the module called \fINAME\fR\&. .RE .PP \fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR .RS 4 The \fIDIRECTORY\fR search path to load modules from\&. The default is \fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. .RE .PP \fB\-o\fR, \fB\-\-one\-line\fR .RS 4 Try to output everything on one line\&. .RE .PP \fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR .RS 4 Poll a background job every \fINUM\fR seconds\&. Requires \fB\-B\fR\&. .RE .PP \fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR .RS 4 Use this file to authenticate the connection\&. .RE .PP \fB\-S\fR, \fB\-\-su\fR .RS 4 Run operations with su (deprecated, use become)\&. .RE .PP \fB\-R\fR \fISU_USER\fR, \fB\-\-se\-user=\fR\fISUDO_USER\fR .RS 4 Run operations with su as this user (default=root) (deprecated, use become)\&. .RE .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 Run the command as the user given by \-u and sudo to root (deprecated, use become)\&. .RE .PP \fB\-\-ssh\-common\-args=\fR\fI\*(Aq\-o ProxyCommand="ssh \-W %h:%p \&..." \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp/scp/ssh command\-line\&. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified\&. .RE .PP \fB\-\-sftp\-extra\-args=\fR\fI\*(Aq\-f \&...\fR\*(Aq .RS 4 Add the specified arguments to any sftp command\-line\&. .RE .PP \fB\-\-scp\-extra\-args=\fR\fI\*(Aq\-l \&...\fR\*(Aq .RS 4 Add the specified arguments to any scp command\-line\&. .RE .PP \fB\-\-ssh\-extra\-args=\fR\fI\*(Aq\-R \&...\fR\*(Aq .RS 4 Add the specified arguments to any ssh command\-line\&. .RE .PP \fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR .RS 4 Sudo to \fISUDO_USERNAME\fR default is root\&. (deprecated, use become)\&. .RE .PP \fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR .RS 4 Save contents in this output \fIDIRECTORY\fR, with the results saved in a file named after each host\&. .RE .PP \fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR .RS 4 Connection timeout to use when trying to talk to hosts, in \fISECONDS\fR\&. .RE .PP \fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR .RS 4 Use this \fIUSERNAME\fR to login to the target host, instead of the current user\&. .RE .PP \fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR .RS 4 A file containing the vault password to be used during the decryption of vault encrypted files\&. Be sure to keep this file secured if it is used\&. If the file is executable, it will be run and its standard output will be used as the password\&. .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. .RE .PP \fB\-\-version\fR .RS 4 Show program version number and exit\&. .RE .SH "INVENTORY" .sp Ansible stores the hosts it can potentially operate on in an inventory\&. This can be an ini\-like file, a script, directory or a list\&. The ini syntax is one host per line\&. Groups headers are allowed and are included on their own line, enclosed in square brackets that start the line\&. .sp Ranges of hosts are also supported\&. For more information and additional options, see the documentation on http://docs\&.ansible\&.com/\&. .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_INVENTORY \(em Override the default ansible inventory file .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/hosts \(em Default inventory file .sp /usr/share/ansible/ \(em Default module library .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. .SH "COPYRIGHT" .sp Copyright \(co 2012, Michael DeHaan Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1), \fBansible\-vault\fR(1), \fBansible\-galaxy\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible .SH "AUTHOR" .PP \fB:doctype:manpage\fR .RS 4 Author. .RE ansible-2.1.1.0/docs/man/man1/ansible.1.asciidoc.in0000664000175400017540000001520612746444466022671 0ustar jenkinsjenkins00000000000000ansible(1) ========= :doctype:manpage :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible - run a task on a target host(s) SYNOPSIS -------- ansible [-m module_name] [-a args] [options] DESCRIPTION ----------- *Ansible* is an extra-simple tool/framework/API for doing \'remote things'. This is the adhoc command that allows for a \'single task playbook' run. ARGUMENTS --------- *host-pattern*:: A name of a group in the inventory, a shell-like glob selecting hosts in inventory or any combination of the two separated by commas. OPTIONS ------- *-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_':: The 'ARGUMENTS' to pass to the module. *-b*, *--become*:: Use privilege escalation (specific one depends on become_method), this does not imply prompting for passwords. *-K*, *--ask-become-pass*:: Ask for privilege escalation password. *-k*, *--ask-pass*:: Prompt for the connection password, if it is needed for the transport used. For example, using ssh and not having a key-based authentication with ssh-agent. *--ask-su-pass*:: Prompt for su password, used with --su (deprecated, use become). *--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). *--ask-vault-pass*:: Prompt for vault password. *-B* 'NUM', *--background=*'NUM':: Run commands in the background, killing the task after 'NUM' seconds. *--become-method=*'BECOME_METHOD':: Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas | doas | dzdo ] *--become-user=*'BECOME_USER':: Run operations as this user (default=root). *-C*, *--check*:: Do not make any changes on the remote system, but test resources to see what might have changed. Note this can not scan all possible resource types and is only a simulation. *-c* 'CONNECTION', *--connection=*'CONNECTION':: Connection type to use. Most common options are 'paramiko' (SSH), 'ssh', 'winrm' and 'local'. 'local' is mostly useful for crontab or kickstarts. *-e* 'EXTRA_VARS, *--extra-vars=*'EXTRA_VARS':: Extra variables to inject into a playbook, in key=value key=value format or as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify the file preceded by @ (e.g. @vars.yml). *-f* 'NUM', *--forks=*'NUM':: Level of parallelism. 'NUM' is specified as an integer, the default is 5. *-h*, *--help*:: Show help message and exit. *-i* 'PATH', *--inventory=*'PATH':: The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'. *-l* 'SUBSET', *--limit=*'SUBSET':: Further limits the selected host/group patterns. You can prefix it with '~' to indicate that the pattern is a regex. *--list-hosts*:: Outputs a list of matching hosts; does not execute anything else. *-m* 'NAME', *--module-name=*'NAME':: Execute the module called 'NAME'. *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: The 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. *-o*, *--one-line*:: Try to output everything on one line. *-P* 'NUM', *--poll=*'NUM':: Poll a background job every 'NUM' seconds. Requires *-B*. *--private-key=*'PRIVATE_KEY_FILE':: Use this file to authenticate the connection. *-S*, *--su*:: Run operations with su (deprecated, use become). *-R* 'SU_USER', *--se-user=*'SUDO_USER':: Run operations with su as this user (default=root) (deprecated, use become). *-s*, *--sudo*:: Run the command as the user given by -u and sudo to root (deprecated, use become). *--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: Add the specified arguments to any sftp/scp/ssh command-line. Useful to set a ProxyCommand to use a jump host, but any arguments that are accepted by all three programs may be specified. *--sftp-extra-args=*''-f ...'':: Add the specified arguments to any sftp command-line. *--scp-extra-args=*''-l ...'':: Add the specified arguments to any scp command-line. *--ssh-extra-args=*''-R ...'':: Add the specified arguments to any ssh command-line. *-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: Sudo to 'SUDO_USERNAME' default is root. (deprecated, use become). *-t* 'DIRECTORY', *--tree=*'DIRECTORY':: Save contents in this output 'DIRECTORY', with the results saved in a file named after each host. *-T* 'SECONDS', *--timeout=*'SECONDS':: Connection timeout to use when trying to talk to hosts, in 'SECONDS'. *-u* 'USERNAME', *--user=*'USERNAME':: Use this 'USERNAME' to login to the target host, instead of the current user. *--vault-password-file=*'VAULT_PASSWORD_FILE':: A file containing the vault password to be used during the decryption of vault encrypted files. Be sure to keep this file secured if it is used. If the file is executable, it will be run and its standard output will be used as the password. *-v*, *--verbose*:: Verbose mode, more output from successful actions will be shown. Give up to three times for more output. *--version*:: Show program version number and exit. INVENTORY --------- Ansible stores the hosts it can potentially operate on in an inventory. This can be an ini-like file, a script, directory or a list. The ini syntax is one host per line. Groups headers are allowed and are included on their own line, enclosed in square brackets that start the line. Ranges of hosts are also supported. For more information and additional options, see the documentation on http://docs.ansible.com/. ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/hosts -- Default inventory file /usr/share/ansible/ -- Default module library /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Michael DeHaan Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible-playbook*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.1.1.0/docs/man/man3/0000775000175400017540000000000012746444530016776 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/docs/man/man3/.gitdir0000664000175400017540000000000012746444466020257 0ustar jenkinsjenkins00000000000000ansible-2.1.1.0/docs/man/.gitignore0000664000175400017540000000002112746444466020131 0ustar jenkinsjenkins00000000000000*.xml *.asciidoc ansible-2.1.1.0/examples/0000775000175400017540000000000012746444530016253 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/examples/ansible.cfg0000664000175400017540000003277312746444466020375 0ustar jenkinsjenkins00000000000000# config file for ansible -- http://ansible.com/ # ============================================== # nearly all parameters can be overridden in ansible-playbook # or with command line flags. ansible will read ANSIBLE_CONFIG, # ansible.cfg in the current working directory, .ansible.cfg in # the home directory or /etc/ansible/ansible.cfg, whichever it # finds first [defaults] # some basic default values... #inventory = /etc/ansible/hosts #library = /usr/share/my_modules/ #remote_tmp = $HOME/.ansible/tmp #local_tmp = $HOME/.ansible/tmp #forks = 5 #poll_interval = 15 #sudo_user = root #ask_sudo_pass = True #ask_pass = True #transport = smart #remote_port = 22 #module_lang = C #module_set_locale = True # plays will gather facts by default, which contain information about # the remote system. # # smart - gather by default, but don't regather if already gathered # implicit - gather by default, turn off with gather_facts: False # explicit - do not gather by default, must say gather_facts: True #gathering = implicit # by default retrieve all facts subsets # all - gather all subsets # network - gather min and network facts # hardware - gather hardware facts (longest facts to retrieve) # virtual - gather min and virtual facts # facter - import facts from facter # ohai - import facts from ohai # You can combine them using comma (ex: network,virtual) # You can negate them using ! (ex: !hardware,!facter,!ohai) # A minimal set of facts is always gathered. #gather_subset = all # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles # uncomment this to disable SSH key host checking #host_key_checking = False # change the default callback #stdout_callback = skippy # enable additional callbacks #callback_whitelist = timer, mail # Determine whether includes in tasks and handlers are "static" by # default. As of 2.0, includes are dynamic by default. Setting these # values to True will make includes behave more like they did in the # 1.x versions. #task_includes_static = True #handler_includes_static = True # change this for alternative sudo implementations #sudo_exe = sudo # What flags to pass to sudo # WARNING: leaving out the defaults might create unexpected behaviours #sudo_flags = -H -S -n # SSH timeout #timeout = 10 # default user to use for playbooks if user is not specified # (/usr/bin/ansible will use current user as default) #remote_user = root # logging is off by default unless this path is defined # if so defined, consider logrotate #log_path = /var/log/ansible.log # default module name for /usr/bin/ansible #module_name = command # use this shell for commands executed under sudo # you may need to change this to bin/bash in rare instances # if sudo is constrained #executable = /bin/sh # if inventory variables overlap, does the higher precedence one win # or are hash values merged together? The default is 'replace' but # this can also be set to 'merge'. #hash_behaviour = replace # by default, variables from roles will be visible in the global variable # scope. To prevent this, the following option can be enabled, and only # tasks and handlers within the role will see the variables there #private_role_vars = yes # list any Jinja2 extensions to enable here: #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n # if set, always use this private key file for authentication, same as # if passing --private-key to ansible or ansible-playbook #private_key_file = /path/to/file # If set, configures the path to the Vault password file as an alternative to # specifying --vault-password-file on the command line. #vault_password_file = /path/to/vault_password_file # format of string {{ ansible_managed }} available within Jinja2 # templates indicates to users editing templates files will be replaced. # replacing {file}, {host} and {uid} and strftime codes with proper values. #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} # This short version is better used in templates as it won't flag the file as changed every run. #ansible_managed = Ansible managed: {file} on {host} # by default, ansible-playbook will display "Skipping [host]" if it determines a task # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" # messages. NOTE: the task header will still be shown regardless of whether or not the # task is skipped. #display_skipped_hosts = True # by default, if a task in a playbook does not include a name: field then # ansible-playbook will construct a header that includes the task's action but # not the task's args. This is a security feature because ansible cannot know # if the *module* considers an argument to be no_log at the time that the # header is printed. If your environment doesn't have a problem securing # stdout from ansible-playbook (or you have manually specified no_log in your # playbook on all of the tasks where you have secret information) then you can # safely set this to True to get more informative messages. #display_args_to_stdout = False # by default (as of 1.3), Ansible will raise errors when attempting to dereference # Jinja2 variables that are not set in templates or action lines. Uncomment this line # to revert the behavior to pre-1.3. #error_on_undefined_vars = False # by default (as of 1.6), Ansible may display warnings based on the configuration of the # system running ansible itself. This may include warnings about 3rd party packages or # other conditions that should be resolved if possible. # to disable these warnings, set the following value to False: #system_warnings = True # by default (as of 1.4), Ansible may display deprecation warnings for language # features that should no longer be used and will be removed in future versions. # to disable these warnings, set the following value to False: #deprecation_warnings = True # (as of 1.8), Ansible can optionally warn when usage of the shell and # command module appear to be simplified by using a default Ansible module # instead. These warnings can be silenced by adjusting the following # setting or adding warn=yes or warn=no to the end of the command line # parameter string. This will for example suggest using the git module # instead of shelling out to the git command. # command_warnings = False # set plugin path directories here, separate with colons #action_plugins = /usr/share/ansible/plugins/action #callback_plugins = /usr/share/ansible/plugins/callback #connection_plugins = /usr/share/ansible/plugins/connection #lookup_plugins = /usr/share/ansible/plugins/lookup #vars_plugins = /usr/share/ansible/plugins/vars #filter_plugins = /usr/share/ansible/plugins/filter #test_plugins = /usr/share/ansible/plugins/test #strategy_plugins = /usr/share/ansible/plugins/strategy # by default callbacks are not loaded for /bin/ansible, enable this if you # want, for example, a notification or logging callback to also apply to # /bin/ansible runs #bin_ansible_callbacks = False # don't like cows? that's unfortunate. # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 #nocows = 1 # set which cowsay stencil you'd like to use by default. When set to 'random', # a random stencil will be selected for each task. The selection will be filtered # against the `cow_whitelist` option below. #cow_selection = default #cow_selection = random # when using the 'random' option for cowsay, stencils will be restricted to this list. # it should be formatted as a comma-separated list with no spaces between names. # NOTE: line continuations here are for formatting purposes only, as the INI parser # in python does not support them. #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www # don't like colors either? # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 #nocolor = 1 # if set to a persistent type (not 'memory', for example 'redis') fact values # from previous runs in Ansible will be stored. This may be useful when # wanting to use, for example, IP information from one group of servers # without having to talk to them in the same playbook run to get their # current IP information. #fact_caching = memory # retry files # When a playbook fails by default a .retry file will be created in ~/ # You can disable this feature by setting retry_files_enabled to False # and you can change the location of the files by setting retry_files_save_path #retry_files_enabled = False #retry_files_save_path = ~/.ansible-retry # squash actions # Ansible can optimise actions that call modules with list parameters # when looping. Instead of calling the module once per with_ item, the # module is called once with all items at once. Currently this only works # under limited circumstances, and only with parameters named 'name'. #squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper # prevents logging of task data, off by default #no_log = False # prevents logging of tasks, but only on the targets, data is still logged on the master/controller #no_target_syslog = False # controls whether Ansible will raise an error or warning if a task has no # choice but to create world readable temporary files to execute a module on # the remote machine. This option is False by default for security. Users may # turn this on to have behaviour more like Ansible prior to 2.1.x. See # https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user # for more secure ways to fix this than enabling this option. #allow_world_readable_tmpfiles = False # controls the compression level of variables sent to # worker processes. At the default of 0, no compression # is used. This value must be an integer from 0 to 9. #var_compression_level = 9 # controls what compression method is used for new-style ansible modules when # they are sent to the remote system. The compression types depend on having # support compiled into both the controller's python and the client's python. # The names should match with the python Zipfile compression types: # * ZIP_STORED (no compression. available everywhere) # * ZIP_DEFLATED (uses zlib, the default) # These values may be set per host via the ansible_module_compression inventory # variable #module_compression = 'ZIP_DEFLATED' # This controls the cutoff point (in bytes) on --diff for files # set to 0 for unlimited (RAM may suffer!). #max_diff_size = 1048576 [privilege_escalation] #become=True #become_method=sudo #become_user=root #become_ask_pass=False [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host # keys encountered. Increases performance on new host additions. Setting works independently of the # host key checking setting above. #record_host_keys=False # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this # line to disable this behaviour. #pty=False [ssh_connection] # ssh arguments to use # Leaving off ControlPersist will result in poor performance, so use # paramiko on older platforms rather than removing it #ssh_args = -o ControlMaster=auto -o ControlPersist=60s # The path to use for the ControlPath sockets. This defaults to # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with # very long hostnames or very long path names (caused by long user names or # deeply nested home directories) this can exceed the character limit on # file socket names (108 characters for most platforms). In that case, you # may wish to shorten the string below. # # Example: # control_path = %(directory)s/%%h-%%r #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r # Enabling pipelining reduces the number of SSH operations required to # execute a module on the remote server. This can result in a significant # performance improvement when enabled, however when using "sudo:" you must # first disable 'requiretty' in /etc/sudoers # # By default, this option is disabled to preserve compatibility with # sudoers configurations that have requiretty (the default on many distros). # #pipelining = False # if True, make ansible use scp if the connection type is ssh # (default is sftp) #scp_if_ssh = True # if False, sftp will not use batch mode to transfer files. This may cause some # types of file transfer failures impossible to catch however, and should # only be disabled if your sftp version has problems with batch mode #sftp_batch_mode = False [accelerate] #accelerate_port = 5099 #accelerate_timeout = 30 #accelerate_connect_timeout = 5.0 # The daemon timeout is measured in minutes. This time is measured # from the last activity to the accelerate daemon. #accelerate_daemon_timeout = 30 # If set to yes, accelerate_multi_key will allow multiple # private keys to be uploaded to it, though each user must # have access to the system via SSH to add a new key. The default # is "no". #accelerate_multi_key = yes [selinux] # file systems that require special treatment when dealing with security context # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs # Set this to yes to allow libvirt_lxc connections to work without SELinux. #libvirt_lxc_noseclabel = yes [colors] #highlight = white #verbose = blue #warn = bright purple #error = red #debug = dark gray #deprecate = purple #skip = cyan #unreachable = red #ok = green #changed = yellow #diff_add = green #diff_remove = red #diff_lines = cyan ansible-2.1.1.0/examples/hosts0000664000175400017540000000177012746444466017353 0ustar jenkinsjenkins00000000000000# This is the default ansible 'hosts' file. # # It should live in /etc/ansible/hosts # # - Comments begin with the '#' character # - Blank lines are ignored # - Groups of hosts are delimited by [header] elements # - You can enter hostnames or ip addresses # - A hostname/ip can be a member of multiple groups # Ex 1: Ungrouped hosts, specify before any group headers. ## green.example.com ## blue.example.com ## 192.168.100.1 ## 192.168.100.10 # Ex 2: A collection of hosts belonging to the 'webservers' group ## [webservers] ## alpha.example.org ## beta.example.org ## 192.168.1.100 ## 192.168.1.110 # If you have multiple hosts following a pattern you can specify # them like this: ## www[001:006].example.com # Ex 3: A collection of database servers in the 'dbservers' group ## [dbservers] ## ## db01.intranet.mydomain.net ## db02.intranet.mydomain.net ## 10.25.1.56 ## 10.25.1.57 # Here's another example of host ranges, this time there are no # leading 0s: ## db-[99:101]-node.example.com ansible-2.1.1.0/lib/0000775000175400017540000000000012746444530015203 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/0000775000175400017540000000000012746444530016620 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/cli/0000775000175400017540000000000012746444530017367 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/cli/__init__.py0000664000175400017540000006211712746444466021517 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import operator import optparse import os import sys import time import yaml import re import getpass import signal import subprocess from ansible.release import __version__ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.utils.unicode import to_bytes, to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' #FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog def format_help(self, formatter=None, epilog=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) class CLI(object): ''' code behind bin/ansible* programs ''' VALID_ACTIONS = ['No Actions'] _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") PAGER = 'less' LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) def __init__(self, args, callback=None): """ Base init method for all command line programs """ self.args = args self.options = None self.parser = None self.action = None self.callback = callback def set_action(self): """ Get the action the user wants to execute from the sys argv list. """ for i in range(0,len(self.args)): arg = self.args[i] if arg in self.VALID_ACTIONS: self.action = arg del self.args[i] break if not self.action: # if no need for action if version/help tmp_options, tmp_args = self.parser.parse_args() if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version): raise AnsibleOptionsError("Missing required action") def execute(self): """ Actually runs a child defined method using the execute_ pattern """ fn = getattr(self, "execute_%s" % self.action) fn() def parse(self): raise Exception("Need to implement!") def run(self): if self.options.verbosity > 0: if C.CONFIG_FILE: display.display(u"Using %s as config file" % to_unicode(C.CONFIG_FILE)) else: display.display(u"No config file found; using defaults") @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): ''' prompt for vault password and/or password change ''' vault_pass = None new_vault_pass = None try: if rekey or not ask_new_vault_pass: vault_pass = getpass.getpass(prompt="Vault password: ") if ask_new_vault_pass: new_vault_pass = getpass.getpass(prompt="New Vault password: ") new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") if new_vault_pass != new_vault_pass2: raise AnsibleError("Passwords do not match") except EOFError: pass # enforce no newline chars at the end of passwords if vault_pass: vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() if new_vault_pass: new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip() if ask_new_vault_pass and not rekey: vault_pass = new_vault_pass return vault_pass, new_vault_pass def ask_passwords(self): ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None becomepass = None become_prompt = '' try: if op.ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() if sshpass: sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') else: become_prompt = "%s password: " % op.become_method.upper() if op.become_ask_pass: becomepass = getpass.getpass(prompt=become_prompt) if op.ask_pass and becomepass == '': becomepass = sshpass if becomepass: becomepass = to_bytes(becomepass) except EOFError: pass return (sshpass, becomepass) def normalize_become_options(self): ''' this keeps backwards compatibility with sudo/su self.options ''' self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER if self.options.become: pass elif self.options.sudo: self.options.become = True self.options.become_method = 'sudo' elif self.options.su: self.options.become = True self.options.become_method = 'su' def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False): ''' check for conflicting options ''' op = self.options if vault_opts: # Check for vault related conflicts if (op.ask_vault_pass and op.vault_password_file): self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") if runas_opts: # Check for privilege escalation conflicts if (op.su or op.su_user) and (op.sudo or op.sudo_user) or \ (op.su or op.su_user) and (op.become or op.become_user) or \ (op.sudo or op.sudo_user) and (op.become or op.become_user): self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " "and su arguments ('-su', '--su-user', and '--ask-su-pass') " "and become arguments ('--become', '--become-user', and '--ask-become-pass')" " are exclusive of each other") if fork_opts: if op.forks < 1: self.parser.error("The number of processes (--forks) must be >= 1") @staticmethod def expand_tilde(option, opt, value, parser): setattr(parser.values, option.dest, os.path.expanduser(value)) @staticmethod def expand_paths(option, opt, value, parser): """optparse action callback to convert a PATH style string arg to a list of path strings. For ex, cli arg of '-p /blip/foo:/foo/bar' would be split on the default os.pathsep and the option value would be set to the list ['/blip/foo', '/foo/bar']. Each path string in the list will also have '~/' values expand via os.path.expanduser().""" path_entries = value.split(os.pathsep) expanded_path_entries = [os.path.expanduser(path_entry) for path_entry in path_entries] setattr(parser.values, option.dest, expanded_path_entries) @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False): ''' create an options parser for most ansible scripts ''' # TODO: implement epilog parsing # OptionParser.format_epilog = lambda self, formatter: self.epilog # base opts parser = SortedOptParser(usage, version=CLI.version("%prog")) parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") if inventory_opts: parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host path (default=%s) or comma separated host list." % C.DEFAULT_HOST_LIST, default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') if module_opts: parser.add_option('-M', '--module-path', dest='module_path', default=None, help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, action="callback", callback=CLI.expand_tilde, type=str) if runtask_opts: parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) if fork_opts: parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--new-vault-password-file', dest='new_vault_password_file', help="new vault password file for rekey", action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--output', default=None, dest='output_file', help='output file name for encrypt or decrypt; use - for stdout', action="callback", callback=CLI.expand_tilde, type=str) if subset_opts: parser.add_option('-t', '--tags', dest='tags', default='all', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', help="only run plays and tasks whose tags do not match these values") if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', help='condense output') parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') if connect_opts: connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts") connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true', help='ask for connection password') connect_group.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args', help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)") connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args', help="specify extra arguments to pass to sftp only (e.g. -f, -l)") connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args', help="specify extra arguments to pass to scp only (e.g. -l)") connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args', help="specify extra arguments to pass to ssh only (e.g. -R)") parser.add_option_group(connect_group) runas_group = None rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts") if runas_opts: runas_group = rg # priv user defaults to root later on to enable detecting when this option was given here runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', help='run operations with su (deprecated, use become)') runas_group.add_option('-R', '--su-user', default=None, help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) # consolidated privilege escalation (become) runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', help="run operations with become (does not imply password prompting)") runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS, help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) runas_group.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) if runas_opts or runas_prompt_opts: if not runas_group: runas_group = rg runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') if runas_group: parser.add_option_group(runas_group) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL) parser.add_option('-B', '--background', dest='seconds', type='int', default=0, help='run asynchronously, failing after X seconds (default=N/A)') if check_opts: parser.add_option("-C", "--check", default=False, dest='check', action='store_true', help="don't make any changes; instead, try to predict some of the changes that may occur") parser.add_option('--syntax-check', dest='syntax', action='store_true', help="perform a syntax check on the playbook, but do not execute it") parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', help="when changing (small) files and templates, show the differences in those files; works great with --check") if meta_opts: parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true', help="run handlers even if a task fails") parser.add_option('--flush-cache', dest='flush_cache', action='store_true', help="clear the fact cache") return parser @staticmethod def version(prog): ''' return ansible version ''' result = "{0} {1}".format(prog, __version__) gitinfo = CLI._gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) result += "\n config file = %s" % C.CONFIG_FILE if C.DEFAULT_MODULE_PATH is None: cpath = "Default w/o overrides" else: cpath = C.DEFAULT_MODULE_PATH result = result + "\n configured module search path = %s" % cpath return result @staticmethod def version_info(gitinfo=False): ''' return full ansible version info ''' if gitinfo: # expensive call, user with care ansible_version_string = CLI.version('') else: ansible_version_string = __version__ ansible_version = ansible_version_string.split()[0] ansible_versions = ansible_version.split('.') for counter in range(len(ansible_versions)): if ansible_versions[counter] == "": ansible_versions[counter] = 0 try: ansible_versions[counter] = int(ansible_versions[counter]) except: pass if len(ansible_versions) < 3: for counter in range(len(ansible_versions), 3): ansible_versions.append(0) return {'string': ansible_version_string.strip(), 'full': ansible_version, 'major': ansible_versions[0], 'minor': ansible_versions[1], 'revision': ansible_versions[2]} @staticmethod def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: gitdir = yaml.safe_load(open(repo_path)).get('gitdir') # There is a possibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) line = f.readline().rstrip("\n") if line.startswith("ref:"): branch_path = os.path.join(repo_path, line[5:]) else: branch_path = None f.close() if branch_path and os.path.exists(branch_path): branch = '/'.join(line.split('/')[2:]) f = open(branch_path) commit = f.readline()[:10] f.close() else: # detached HEAD commit = line[:10] branch = 'detached HEAD' branch_path = os.path.join(repo_path, "HEAD") date = time.localtime(os.stat(branch_path).st_mtime) if time.daylight == 0: offset = time.timezone else: offset = time.altzone result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36)) else: result = '' return result @staticmethod def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') result = CLI._git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') if not os.path.exists(submodules): return result f = open(submodules) for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result def pager(self, text): ''' find reasonable way to display text ''' # this is a much simpler form of what is in pydoc.py if not sys.stdout.isatty(): display.display(text) elif 'PAGER' in os.environ: if sys.platform == 'win32': display.display(text) else: self.pager_pipe(text, os.environ['PAGER']) elif subprocess.call('(less --version) &> /dev/null', shell = True) == 0: self.pager_pipe(text, 'less') else: display.display(text) @staticmethod def pager_pipe(text, cmd): ''' pipe text through a pager ''' if 'LESS' not in os.environ: os.environ['LESS'] = CLI.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) cmd.communicate(input=to_bytes(text)) except IOError: pass except KeyboardInterrupt: pass @classmethod def tty_ify(cls, text): t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] t = cls._URL.sub(r"\1", t) # U(word) => word t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' return t @staticmethod def read_vault_password_file(vault_password_file, loader): """ Read a vault password from a file or if executable, execute the script and retrieve password from STDOUT """ this_path = os.path.realpath(os.path.expanduser(vault_password_file)) if not os.path.exists(this_path): raise AnsibleError("The vault password file %s was not found" % this_path) if loader.is_executable(this_path): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(this_path, stdout=subprocess.PIPE) except OSError as e: raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr)) vault_pass = stdout.strip('\r\n') else: try: f = open(this_path, "rb") vault_pass=f.read().strip() f.close() except (OSError, IOError) as e: raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) return vault_pass def get_opt(self, k, defval=""): """ Returns an option from an Optparse values instance. """ try: data = getattr(self.options, k) except: return defval # FIXME: Can this be removed if cli and/or constants ensures it's a # list? if k == "roles_path": if os.pathsep in data: data = data.split(os.pathsep)[0] return data ansible-2.1.1.0/lib/ansible/cli/adhoc.py0000664000175400017540000001617512746444466021041 0ustar jenkinsjenkins00000000000000# (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## import os from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.plugins import get_all_plugin_loaders from ansible.utils.vars import load_extra_vars from ansible.utils.vars import load_options_vars from ansible.utils.unicode import to_unicode from ansible.vars import VariableManager try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() ######################################################## class AdHocCLI(CLI): ''' code behind ansible ad-hoc cli''' def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog [options]', runas_opts=True, inventory_opts=True, async_opts=True, output_opts=True, connect_opts=True, check_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, ) # options unique to ansible ad-hoc self.parser.add_option('-a', '--args', dest='module_args', help="module arguments", default=C.DEFAULT_MODULE_ARGS) self.parser.add_option('-m', '--module-name', dest='module_name', help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) self.options, self.args = self.parser.parse_args(self.args[1:]) if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) return True def _play_ds(self, pattern, async, poll): check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') return dict( name = "Ansible Ad-Hoc", hosts = pattern, gather_facts = 'no', tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async=async, poll=poll) ] ) def run(self): ''' use Runner lib to do SSH things ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = to_unicode(self.args[0], errors='strict') # ignore connection password cause we are local if self.options.connection == "local": self.options.ask_pass = False sshpass = None becomepass = None vault_pass = None self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } loader = DataLoader() if self.options.vault_password_file: # read vault_pass from a file vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader) loader.set_vault_password(vault_pass) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords()[0] loader.set_vault_password(vault_pass) variable_manager = VariableManager() variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options) variable_manager.options_vars = load_options_vars(self.options) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory display.warning("provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) hosts = inventory.list_hosts(pattern) if len(hosts) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified hosts and/or --limit does not match any hosts") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) for host in hosts: display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: err = "No argument passed to %s module" % self.options.module_name if pattern.endswith(".yml"): err = err + ' (did you mean to run ansible-playbook?)' raise AnsibleOptionsError(err) # dynamically load any plugins from the playbook directory for name, obj in get_all_plugin_loaders(): if obj.subdir: plugin_path = os.path.join('.', obj.subdir) if os.path.isdir(plugin_path): obj.add_directory(plugin_path) play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) if self.callback: cb = self.callback elif self.options.one_line: cb = 'oneline' else: cb = 'minimal' run_tree=False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree run_tree=True # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if loader: loader.cleanup_all_tmp_files() return result ansible-2.1.1.0/lib/ansible/cli/console.py0000664000175400017540000003643112746444466021422 0ustar jenkinsjenkins00000000000000# (c) 2014, Nandor Sivok # (c) 2016, Redhat Inc # # ansible-console is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ansible-console is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## # ansible-console is an interactive REPL shell for ansible # with built-in tab completion for all the documented modules # # Available commands: # cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*) # list - list available hosts in the current path # forks - change fork # become - become # ! - forces shell module instead of the ansible module (!yum update -y) import atexit import cmd import getpass import readline import os import sys from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.vars import VariableManager from ansible.utils import module_docs from ansible.utils.color import stringc from ansible.utils.unicode import to_unicode, to_str from ansible.plugins import module_loader try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ConsoleCLI(CLI, cmd.Cmd): modules = [] def __init__(self, args): super(ConsoleCLI, self).__init__(args) self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n' self.groups = [] self.hosts = [] self.pattern = None self.variable_manager = None self.loader = None self.passwords = dict() self.modules = None cmd.Cmd.__init__(self) def parse(self): self.parser = CLI.base_parser( usage='%prog [options]', runas_opts=True, inventory_opts=True, connect_opts=True, check_opts=True, vault_opts=True, fork_opts=True, module_opts=True, ) # options unique to shell self.parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") self.parser.set_defaults(cwd='*') self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) return True def get_names(self): return dir(self) def cmdloop(self): try: cmd.Cmd.cmdloop(self) except KeyboardInterrupt: self.do_exit(self) def set_prompt(self): login_user = self.options.remote_user or getpass.getuser() self.selected = self.inventory.list_hosts(self.options.cwd) prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks) if self.options.become and self.options.become_user in [None, 'root']: prompt += "# " color = C.COLOR_ERROR else: prompt += "$ " color = C.COLOR_HIGHLIGHT self.prompt = stringc(prompt, color) def list_modules(self): modules = set() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) module_paths = module_loader._get_paths() for path in module_paths: if path is not None: modules.update(self._find_modules_in_path(path)) return modules def _find_modules_in_path(self, path): if os.path.isdir(path): for module in os.listdir(path): if module.startswith('.'): continue elif os.path.isdir(module): self._find_modules_in_path(module) elif module.startswith('__'): continue elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path,module]) if os.path.islink(fullpath): # avoids aliases continue module = module.replace('_', '', 1) module = os.path.splitext(module)[0] # removes the extension yield module def default(self, arg, forceshell=False): """ actually runs modules """ if arg.startswith("#"): return False if not self.options.cwd: display.error("No host found") return False if arg.split()[0] in self.modules: module = arg.split()[0] module_args = ' '.join(arg.split()[1:]) else: module = 'shell' module_args = arg if forceshell is True: module = 'shell' module_args = arg self.options.module_name = module result = None try: check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') play_ds = dict( name = "Ansible Shell", hosts = self.options.cwd, gather_facts = 'no', tasks = [ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))] ) play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) except Exception as e: display.error(u"Unable to build command: %s" % to_unicode(e)) return False try: cb = 'minimal' #FIXME: make callbacks configurable # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() if result is None: display.error("No hosts found") return False except KeyboardInterrupt: display.error('User interrupted execution') return False except Exception as e: display.error(to_unicode(e)) #FIXME: add traceback in very very verbose mode return False def emptyline(self): return def do_shell(self, arg): """ You can run shell commands through the shell module. eg.: shell ps uax | grep java | wc -l shell killall python shell halt -n You can use the ! to force the shell module. eg.: !ps aux | grep java | wc -l """ self.default(arg, True) def do_forks(self, arg): """Set the number of forks""" if not arg: display.display('Usage: forks ') return self.options.forks = int(arg) self.set_prompt() do_serial = do_forks def do_verbosity(self, arg): """Set verbosity level""" if not arg: display.display('Usage: verbosity ') else: display.verbosity = int(arg) display.v('verbosity level set to %s' % arg) def do_cd(self, arg): """ Change active host/group. You can use hosts patterns as well eg.: cd webservers cd webservers:dbservers cd webservers:!phoenix cd webservers:&staging cd webservers:dbservers:&staging:!phoenix """ if not arg: self.options.cwd = '*' elif arg == '..': try: self.options.cwd = self.inventory.groups_for_host(self.options.cwd)[1].name except Exception: self.options.cwd = '' elif arg in '/*': self.options.cwd = 'all' elif self.inventory.get_hosts(arg): self.options.cwd = arg else: display.display("no host matched") self.set_prompt() def do_list(self, arg): """List the hosts in the current group""" if arg == 'groups': for group in self.groups: display.display(group) else: for host in self.selected: display.display(host.name) def do_become(self, arg): """Toggle whether plays run with become""" if arg: self.options.become = C.mk_boolean(arg) display.v("become changed to %s" % self.options.become) self.set_prompt() else: display.display("Please specify become value, e.g. `become yes`") def do_remote_user(self, arg): """Given a username, set the remote user plays are run by""" if arg: self.options.remote_user = arg self.set_prompt() else: display.display("Please specify a remote user, e.g. `remote_user root`") def do_become_user(self, arg): """Given a username, set the user that plays are run by when using become""" if arg: self.options.become_user = arg else: display.display("Please specify a user, e.g. `become_user jenkins`") display.v("Current user is %s" % self.options.become_user) self.set_prompt() def do_become_method(self, arg): """Given a become_method, set the privilege escalation method when using become""" if arg: self.options.become_method = arg display.v("become_method changed to %s" % self.options.become_method) else: display.display("Please specify a become_method, e.g. `become_method su`") def do_exit(self, args): """Exits from the console""" sys.stdout.write('\n') return -1 do_EOF = do_exit def helpdefault(self, module_name): if module_name in self.modules: in_path = module_loader.find_plugin(module_name) if in_path: oc, a, _ = module_docs.get_docstring(in_path) if oc: display.display(oc['short_description']) display.display('Parameters:') for opt in oc['options'].keys(): display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0]) else: display.error('No documentation found for %s.' % module_name) else: display.error('%s is not a valid command, use ? to list all valid commands.' % module_name) def complete_cd(self, text, line, begidx, endidx): mline = line.partition(' ')[2] offs = len(mline) - len(text) if self.options.cwd in ('all','*','\\'): completions = self.hosts + self.groups else: completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)] return [to_str(s)[offs:] for s in completions if to_str(s).startswith(to_str(mline))] def completedefault(self, text, line, begidx, endidx): if line.split()[0] in self.modules: mline = line.split(' ')[-1] offs = len(mline) - len(text) completions = self.module_args(line.split()[0]) return [s[offs:] + '=' for s in completions if s.startswith(mline)] def module_args(self, module_name): in_path = module_loader.find_plugin(module_name) oc, a, _ = module_docs.get_docstring(in_path) return oc['options'].keys() def run(self): super(ConsoleCLI, self).run() sshpass = None becomepass = None vault_pass = None # hosts if len(self.args) != 1: self.pattern = 'all' else: self.pattern = self.args[0] self.options.cwd = self.pattern # dynamically add modules as commands self.modules = self.list_modules() for module in self.modules: setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg)) setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module)) self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() self.passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } self.loader = DataLoader() if self.options.vault_password_file: # read vault_pass from a file vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader) self.loader.set_vault_password(vault_pass) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords()[0] self.loader.set_vault_password(vault_pass) self.variable_manager = VariableManager() self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory) self.variable_manager.set_inventory(self.inventory) no_hosts = False if len(self.inventory.list_hosts()) == 0: # Empty inventory no_hosts = True display.warning("provided hosts list is empty, only localhost is available") self.inventory.subset(self.options.subset) hosts = self.inventory.list_hosts(self.pattern) if len(hosts) == 0 and not no_hosts: raise AnsibleError("Specified hosts and/or --limit does not match any hosts") self.groups = self.inventory.list_groups() self.hosts = [x.name for x in hosts] # This hack is to work around readline issues on a mac: # http://stackoverflow.com/a/7116997/541202 if 'libedit' in readline.__doc__: readline.parse_and_bind("bind ^I rl_complete") else: readline.parse_and_bind("tab: complete") histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history") try: readline.read_history_file(histfile) except IOError: pass atexit.register(readline.write_history_file, histfile) self.set_prompt() self.cmdloop() ansible-2.1.1.0/lib/ansible/cli/doc.py0000664000175400017540000002761712746444466020533 0ustar jenkinsjenkins00000000000000# (c) 2014, James Tanner # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import traceback import textwrap from ansible.compat.six import iteritems from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins import module_loader from ansible.cli import CLI from ansible.utils import module_docs try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class DocCLI(CLI): """ Vault command line class """ def __init__(self, args): super(DocCLI, self).__init__(args) self.module_list = [] def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [options] [module...]', epilog='Show Ansible module documentation', module_opts=True, ) self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available modules') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity def run(self): super(DocCLI, self).run() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) # list modules if self.options.list_dir: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.pager(self.get_module_list_text()) return 0 if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line module list text = '' for module in self.args: try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.print_exc()) display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) continue if doc is not None: all_keys = [] for (k,v) in iteritems(doc['options']): all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = filename doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs if self.options.show_snippet: text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.print_exc()) raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) self.pager(text) return 0 def find_modules(self, path): if os.path.isdir(path): for module in os.listdir(path): if module.startswith('.'): continue elif os.path.isdir(module): self.find_modules(module) elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module.startswith('__'): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path,module]) if os.path.islink(fullpath): # avoids aliases continue module = os.path.splitext(module)[0] # removes the extension self.module_list.append(module) def get_module_list_text(self): columns = display.columns displace = max(len(x) for x in self.module_list) linelimit = columns - displace - 5 text = [] deprecated = [] for module in sorted(set(self.module_list)): if module in module_docs.BLACKLIST_MODULES: continue # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue try: doc, plainexamples, returndocs = module_docs.get_docstring(filename) desc = self.tty_ify(doc.get('short_description', '?')).strip() if len(desc) > linelimit: desc = desc[:linelimit] + '...' if module.startswith('_'): # Handle deprecated deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) @staticmethod def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in finder._get_paths(): if i not in ret: ret.append(i) return os.pathsep.join(ret) def get_snippet_text(self, doc): text = [] desc = CLI.tty_ify(doc['short_description']) text.append("- name: %s" % (desc)) text.append(" action: %s" % (doc['module'])) pad = 31 subdent = ''.join([" " for a in xrange(pad)]) limit = display.columns - pad for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = CLI.tty_ify(" ".join(opt['description'])) required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: s = o + "=" else: s = o text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent))) text.append('') return "\n".join(text) def get_man_text(self, doc): opt_indent=" " text = [] text.append("> %s\n" % doc['module'].upper()) pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) if isinstance(doc['description'], list): desc = " ".join(doc['description']) else: desc = doc['description'] text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" ")) if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0: text.append("DEPRECATED: \n%s\n" % doc['deprecated']) if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") for o in sorted(doc['option_keys']): opt = doc['options'][o] required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): desc = " ".join(opt['description']) else: desc = opt['description'] if 'choices' in opt: choices = ", ".join(str(i) for i in opt['choices']) desc = desc + " (Choices: " + choices + ")" if 'default' in opt: default = str(opt['default']) desc = desc + " [Default: " + default + "]" text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0: notes = " ".join(doc['notes']) text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent)) if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: req = ", ".join(doc['requirements']) text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent)) if 'examples' in doc and len(doc['examples']) > 0: text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) for ex in doc['examples']: text.append("%s\n" % (ex['code'])) if 'plainexamples' in doc and doc['plainexamples'] is not None: text.append("EXAMPLES:") text.append(doc['plainexamples']) if 'returndocs' in doc and doc['returndocs'] is not None: text.append("RETURN VALUES:") text.append(doc['returndocs']) text.append('') maintainers = set() if 'author' in doc: if isinstance(doc['author'], basestring): maintainers.add(doc['author']) else: maintainers.update(doc['author']) if 'maintainers' in doc: if isinstance(doc['maintainers'], basestring): maintainers.add(doc['author']) else: maintainers.update(doc['author']) text.append('MAINTAINERS: ' + ', '.join(maintainers)) text.append('') return "\n".join(text) ansible-2.1.1.0/lib/ansible/cli/galaxy.py0000664000175400017540000007262512746444466021252 0ustar jenkinsjenkins00000000000000######################################################################## # # (C) 2013, James Cammarata # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os.path import sys import yaml import time from collections import defaultdict from jinja2 import Environment import ansible.constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.unicode import to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() # common self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.') # specific to actions if self.action == "delete": self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.') elif self.action == "install": self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.') self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by') self.parser.add_option('--author', dest='author', help='GitHub username') elif self.action == "setup": self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action if self.action in ['init', 'info']: self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles") if not self.action in ("delete","import","init","login","setup"): # NOTE: while the option type=str, the default is a list, and the # callback will set the value to a list. self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)') if self.action in ("init","install"): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') self.options, self.args =self.parser.parse_args() display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) return True def run(self): super(GalaxyCLI, self).run() self.api = GalaxyAPI(self.galaxy) self.execute() def exit_without_ignore(self, rc=1): """ Exits with the specified return code unless the option --ignore-errors was specified """ if not self.get_opt("ignore_errors", False): raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.') def _display_role_info(self, role_info): text = [u"", u"Role: %s" % to_unicode(role_info['name'])] text.append(u"\tdescription: %s" % role_info.get('description', '')) for k in sorted(role_info.keys()): if k in self.SKIP_INFO_KEYS: continue if isinstance(role_info[k], dict): text += "\t%s: \n" % (k) text.append(u"\t%s:" % (k)) for key in sorted(role_info[k].keys()): if key in self.SKIP_INFO_KEYS: continue text.append(u"\t\t%s: %s" % (key, role_info[k][key])) else: text.append(u"\t%s: %s" % (k, role_info[k])) return u'\n'.join(text) ############################ # execute actions ############################ def execute_init(self): """ Executes the init action, which creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.get_opt('init_path', './') force = self.get_opt('force', False) offline = self.get_opt('offline', False) role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError("- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") f = open(readme_path, "wb") f.write(self.galaxy.default_readme) f.close() # create default .travis.yml travis = Environment().from_string(self.galaxy.default_travis).render() f = open(os.path.join(role_path, '.travis.yml'), 'w') f.write(travis) f.close() for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) # now create the main.yml file for that directory if dir == "meta": # create a skeleton meta/main.yml with a valid galaxy_info # datastructure in place, plus with all of the available # platforms included (but commented out), the galaxy_tags # list, and the dependencies section platforms = [] if not offline: platforms = self.api.get_list("platforms") or [] # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions platform_groups = defaultdict(list) for platform in platforms: platform_groups[platform['name']].append(platform['release']) platform_groups[platform['name']].sort() inject = dict( author = 'your name', description = 'your description', company = 'your company (optional)', license = 'license (GPLv2, CC-BY, etc)', issue_tracker_url = 'http://example.com/issue/tracker', min_ansible_version = '1.2', platforms = platform_groups, ) rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject) f = open(main_yml_path, 'w') f.write(rendered_meta) f.close() pass elif dir == "tests": # create tests/test.yml inject = dict( role_name = role_name ) playbook = Environment().from_string(self.galaxy.default_test).render(inject) f = open(os.path.join(dir_path, 'test.yml'), 'w') f.write(playbook) f.close() # create tests/inventory f = open(os.path.join(dir_path, 'inventory'), 'w') f.write('localhost') f.close() elif dir not in ('files','templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') f.write('---\n# %s file for %s\n' % (dir,role_name)) f.close() display.display("- %s was created successfully" % role_name) def execute_info(self): """ Executes the info action. This action prints out detailed information about an installed role as well as info available from the galaxy API. """ if len(self.args) == 0: # the user needs to specify a role raise AnsibleOptionsError("- you must specify a user/role name") roles_path = self.get_opt("roles_path") data = '' for role in self.args: role_info = {'path': roles_path} gr = GalaxyRole(self.galaxy, role) install_info = gr.install_info if install_info: if 'version' in install_info: install_info['intalled_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) remote_data = False if not self.options.offline: remote_data = self.api.lookup_role_by_name(role, False) if remote_data: role_info.update(remote_data) if gr.metadata: role_info.update(gr.metadata) req = RoleRequirement() role_spec= req.role_yaml_parse({'role': role}) if role_spec: role_info.update(role_spec) data = self._display_role_info(role_info) ### FIXME: This is broken in both 1.9 and 2.0 as # _display_role_info() always returns something if not data: data = u"\n- the role %s was not found" % role self.pager(data) def execute_install(self): """ Executes the installation action. The args list contains the roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.get_opt("role_file", None) if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name raise AnsibleOptionsError("- you must specify a user/role name or a roles file") elif len(self.args) == 1 and role_file is not None: # using a role file is mutually exclusive of specifying # the role name on the command line raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both") no_deps = self.get_opt("no_deps", False) force = self.get_opt('force', False) roles_left = [] if role_file: try: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): try: required_roles = yaml.safe_load(f.read()) except Exception as e: raise AnsibleError("Unable to load data from the requirements file: %s" % role_file) if required_roles is None: raise AnsibleError("No roles found in file: %s" % role_file) for role in required_roles: role = RoleRequirement.role_yaml_parse(role) display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) else: display.deprecated("going forward only the yaml format will be supported") # roles listed in a file, one per line for rline in f.readlines(): if rline.startswith("#") or rline.strip() == '': continue display.debug('found role %s in text file' % str(rline)) role = RoleRequirement.role_yaml_parse(rline.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) f.close() except (IOError, OSError) as e: display.error('Unable to open %s: %s' % (role_file, str(e))) else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: role = RoleRequirement.role_yaml_parse(rname.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: display.display('- %s is already installed, skipping.' % role.name) continue try: installed = role.install() except AnsibleError as e: display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None or force: if dep_role not in roles_left: display.display('- adding dependency: %s' % dep_role.name) roles_left.append(dep_role) else: display.display('- dependency %s already pending installation.' % dep_role.name) else: display.display('- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0 def execute_remove(self): """ Executes the remove action. The args list contains the list of roles to be removed. This list can contain more than one role. """ if len(self.args) == 0: raise AnsibleOptionsError('- you must specify at least one role to remove.') for role_name in self.args: role = GalaxyRole(self.galaxy, role_name) try: if role.remove(): display.display('- successfully removed %s' % role_name) else: display.display('- %s is not installed, skipping.' % role_name) except Exception as e: raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e))) return 0 def execute_list(self): """ Executes the list action. The args list can contain zero or one role. If one is specified, only that role will be shown, otherwise all roles in the specified directory will be shown. """ if len(self.args) > 1: raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list") if len(self.args) == 1: # show only the request role, if it exists name = self.args.pop() gr = GalaxyRole(self.galaxy, name) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" # show some more info about single roles here display.display("- %s, %s" % (name, version)) else: display.display("- the role %s was not found" % name) else: # show all valid roles in the roles_path directory roles_path = self.get_opt('roles_path') for path in roles_path: role_path = os.path.expanduser(path) if not os.path.exists(role_path): raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path) elif not os.path.isdir(role_path): raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path) path_files = os.listdir(role_path) for path_file in path_files: gr = GalaxyRole(self.galaxy, path_file) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" display.display("- %s, %s" % (path_file, version)) return 0 def execute_search(self): page_size = 1000 search = None if len(self.args): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.tags, author=self.options.author, page_size=page_size) if response['count'] == 0: display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = [u''] if response['count'] > page_size: data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size)) else: data.append(u"Found %d roles matching your search:" % response['count']) max_len = [] for role in response['results']: max_len.append(len(role['username'] + '.' + role['name'])) name_len = max(max_len) format_str = u" %%-%ds %%s" % name_len data.append(u'') data.append(format_str % (u"Name", u"Description")) data.append(format_str % (u"----", u"-----------")) for role in response['results']: data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description'])) data = u'\n'.join(data) self.pager(data) return True def execute_login(self): """ Verify user's identify via Github and retreive an auth token from Galaxy. """ # Authenticate with github and retrieve a token if self.options.token is None: login = GalaxyLogin(self.galaxy) github_token = login.create_github_token() else: github_token = self.options.token galaxy_response = self.api.authenticate(github_token) if self.options.token is None: # Remove the token we created login.remove_github_token() # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) return 0 def execute_import(self): """ Import a role into Galaxy """ colors = { 'INFO': 'normal', 'WARNING': C.COLOR_WARN, 'ERROR': C.COLOR_ERROR, 'SUCCESS': C.COLOR_OK, 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: raise AnsibleError("Expected a github_username and github_repository. Use --help.") github_repo = self.args.pop() github_user = self.args.pop() if self.options.check_status: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), color='yellow') display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not self.options.wait: display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo'])) if self.options.check_status or self.options.wait: # Get the status of the import msg_list = [] finished = False while not finished: task = self.api.get_import_task(task_id=task[0]['id']) for msg in task[0]['summary_fields']['task_messages']: if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) if task[0]['state'] in ['SUCCESS', 'FAILED']: finished = True else: time.sleep(10) return 0 def execute_setup(self): """ Setup an integration from Github or Travis """ if self.options.setup_list: # List existing integration secrets secrets = self.api.list_secrets() if len(secrets) == 0: # None found display.display("No integrations found.") return 0 display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], secret['github_repo']),color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") return 0 secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() source = self.args.pop() resp = self.api.add_secret(source, github_user, github_repo, secret) display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) return 0 def execute_delete(self): """ Delete a role from galaxy.ansible.com """ if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) if len(resp['deleted_roles']) > 1: display.display("Deleted the following roles:") display.display("ID User Name") display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) display.display(resp['status']) return True ansible-2.1.1.0/lib/ansible/cli/playbook.py0000664000175400017540000002215512746444466021576 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import stat from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader from ansible.playbook.block import Block from ansible.playbook.play_context import PlayContext from ansible.utils.vars import load_extra_vars from ansible.utils.vars import load_options_vars from ansible.vars import VariableManager try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() #--------------------------------------------------------------------------------------------------- class PlaybookCLI(CLI): ''' code behind ansible playbook cli''' def parse(self): # create parser for CLI options parser = CLI.base_parser( usage = "%prog playbook.yml", connect_opts=True, meta_opts=True, runas_opts=True, subset_opts=True, check_opts=True, inventory_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, ) # ansible playbook specific opts parser.add_option('--list-tasks', dest='listtasks', action='store_true', help="list all tasks that would be executed") parser.add_option('--list-tags', dest='listtags', action='store_true', help="list all available tags") parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") self.options, self.args = parser.parse_args(self.args[1:]) self.parser = parser if len(self.args) == 0: raise AnsibleOptionsError("You must specify a playbook file to run") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None vault_pass = None passwords = {} # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } loader = DataLoader() if self.options.vault_password_file: # read vault_pass from a file vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader) loader.set_vault_password(vault_pass) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords()[0] loader.set_vault_password(vault_pass) # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager() variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options) variable_manager.options_vars = load_options_vars(self.options) # create the inventory, and filter it based on the subset specified (if any) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory display.warning("provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified --limit does not match any hosts") # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name() else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) return taskmsg all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) return 0 else: return results ansible-2.1.1.0/lib/ansible/cli/pull.py0000664000175400017540000002536712746444466020742 0ustar jenkinsjenkins00000000000000# (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## import datetime import os import platform import random import shutil import socket import sys import time from ansible.errors import AnsibleOptionsError from ansible.cli import CLI from ansible.plugins import module_loader from ansible.utils.cmd_functions import run_cmd try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() ######################################################## class PullCLI(CLI): ''' code behind ansible ad-hoc cli''' DEFAULT_REPO_TYPE = 'git' DEFAULT_PLAYBOOK = 'local.yml' PLAYBOOK_ERRORS = { 1: 'File does not exist', 2: 'File is not readable' } SUPPORTED_REPO_MODULES = ['git'] def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog -U [options]', connect_opts=True, vault_opts=True, runtask_opts=True, subset_opts=True, inventory_opts=True, module_opts=True, runas_prompt_opts=True, ) # options unique to pull self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option('-s', '--sleep', dest='sleep', default=None, help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') self.parser.add_option('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', help='adds the hostkey for the repo url if not already added') self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE) self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true', help='verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation') # for pull we don't wan't a default self.parser.set_defaults(inventory=None) self.options, self.args = self.parser.parse_args(self.args[1:]) if not self.options.dest: hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest)) if self.options.sleep: try: secs = random.randint(0,int(self.options.sleep)) self.options.sleep = secs except ValueError: raise AnsibleOptionsError("%s is not a number." % self.options.sleep) if not self.options.url: raise AnsibleOptionsError("URL for repository not specified, use -h for help") if self.options.module_name not in self.SUPPORTED_REPO_MODULES: raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES))) display.verbosity = self.options.verbosity self.validate_conflicts(vault_opts=True) def run(self): ''' use Runner lib to do SSH things ''' super(PullCLI, self).run() # log command line now = datetime.datetime.now() display.display(now.strftime("Starting Ansible Pull at %F %T")) display.display(' '.join(sys.argv)) # Build Checkout command # Now construct the ansible command node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost as default if not self.options.inventory or ( ',' not in self.options.inventory and not os.path.exists(self.options.inventory)): inv_opts = 'localhost,' else: inv_opts = self.options.inventory #FIXME: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' version=%s' % self.options.checkout if self.options.accept_host_key: repo_opts += ' accept_hostkey=yes' if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file if self.options.verify: repo_opts += ' verify_commit=yes' if not self.options.fullclone: repo_opts += ' depth=1' path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) # hardcode local and inventory/host as this is just meant to fetch the repo cmd = '%s/ansible -i "%s" %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev # Nap? if self.options.sleep: display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep) # RUN the Checkout command display.debug("running ansible with VCS module to checkout repo") display.vvvv('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if rc != 0: if self.options.force: display.warning("Unable to update repository. Continuing with (forced) run of playbook.") else: return rc elif self.options.ifchanged and '"changed": true' not in out: display.display("Repository has not changed, quitting.") return 0 playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") # Build playbook command cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if self.options.vault_password_file: cmd += " --vault-password-file=%s" % self.options.vault_password_file if self.options.inventory: cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: cmd += ' --ask-become-pass' if self.options.skip_tags: cmd += ' --skip-tags "%s"' % self.options.skip_tags if self.options.tags: cmd += ' -t "%s"' % self.options.tags if self.options.subset: cmd += ' -l "%s"' % self.options.subset else: cmd += ' -l "%s"' % limit_opts os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND display.debug("running ansible-playbook to do actual work") display.debug('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if self.options.purge: os.chdir('/') try: shutil.rmtree(self.options.dest) except Exception as e: display.error("Failed to remove %s: %s" % (self.options.dest, str(e))) return rc def try_playbook(self, path): if not os.path.exists(path): return 1 if not os.access(path, os.R_OK): return 2 return 0 def select_playbook(self, path): playbook = None if len(self.args) > 0 and self.args[0] is not None: playbook = os.path.join(path, self.args[0]) rc = self.try_playbook(playbook) if rc != 0: display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc])) return None return playbook else: fqdn = socket.getfqdn() hostpb = os.path.join(path, fqdn + '.yml') shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml') localpb = os.path.join(path, self.DEFAULT_PLAYBOOK) errors = [] for pb in [hostpb, shorthostpb, localpb]: rc = self.try_playbook(pb) if rc == 0: playbook = pb break else: errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc])) if playbook is None: display.warning("\n".join(errors)) return playbook ansible-2.1.1.0/lib/ansible/cli/vault.py0000664000175400017540000001505412746444466021111 0ustar jenkinsjenkins00000000000000# (c) 2014, James Tanner # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.parsing.dataloader import DataLoader from ansible.parsing.vault import VaultEditor from ansible.cli import CLI from ansible.utils.unicode import to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class VaultCLI(CLI): """ Vault command line class """ VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") def __init__(self, args): self.vault_pass = None self.new_vault_pass = None super(VaultCLI, self).__init__(args) def parse(self): self.parser = CLI.base_parser( vault_opts=True, usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() # options specific to self.actions if self.action == "create": self.parser.set_usage("usage: %prog create [options] file_name") elif self.action == "decrypt": self.parser.set_usage("usage: %prog decrypt [options] file_name") elif self.action == "edit": self.parser.set_usage("usage: %prog edit [options] file_name") elif self.action == "view": self.parser.set_usage("usage: %prog view [options] file_name") elif self.action == "encrypt": self.parser.set_usage("usage: %prog encrypt [options] file_name") elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity can_output = ['encrypt', 'decrypt'] if self.action not in can_output: if self.options.output_file: raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output)) if len(self.args) == 0: raise AnsibleOptionsError("Vault requires at least one filename as a parameter") else: # This restriction should remain in place until it's possible to # load multiple YAML records from a single file, or it's too easy # to create an encrypted file that can't be read back in. But in # the meanwhile, "cat a b c|ansible-vault encrypt --output x" is # a workaround. if self.options.output_file and len(self.args) > 1: raise AnsibleOptionsError("At most one input file may be used with the --output option") def run(self): super(VaultCLI, self).run() loader = DataLoader() # set default restrictive umask old_umask = os.umask(0o077) if self.options.vault_password_file: # read vault_pass from a file self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader) else: newpass = False rekey = False if not self.options.new_vault_password_file: newpass = (self.action in ['create', 'rekey', 'encrypt']) rekey = (self.action == 'rekey') self.vault_pass, self.new_vault_pass = self.ask_vault_passwords(ask_new_vault_pass=newpass, rekey=rekey) if self.options.new_vault_password_file: # for rekey only self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader) if not self.vault_pass: raise AnsibleOptionsError("A password is required to use Ansible's Vault") self.editor = VaultEditor(self.vault_pass) self.execute() # and restore umask os.umask(old_umask) def execute_encrypt(self): if len(self.args) == 0 and sys.stdin.isatty(): display.display("Reading plaintext input from stdin", stderr=True) for f in self.args or ['-']: self.editor.encrypt_file(f, output_file=self.options.output_file) if sys.stdout.isatty(): display.display("Encryption successful", stderr=True) def execute_decrypt(self): if len(self.args) == 0 and sys.stdin.isatty(): display.display("Reading ciphertext input from stdin", stderr=True) for f in self.args or ['-']: self.editor.decrypt_file(f, output_file=self.options.output_file) if sys.stdout.isatty(): display.display("Decryption successful", stderr=True) def execute_create(self): if len(self.args) > 1: raise AnsibleOptionsError("ansible-vault create can take only one filename argument") self.editor.create_file(self.args[0]) def execute_edit(self): for f in self.args: self.editor.edit_file(f) def execute_view(self): for f in self.args: # Note: vault should return byte strings because it could encrypt # and decrypt binary files. We are responsible for changing it to # unicode here because we are displaying it and therefore can make # the decision that the display doesn't have to be precisely what # the input was (leave that to decrypt instead) self.pager(to_unicode(self.editor.plaintext(f))) def execute_rekey(self): for f in self.args: if not (os.path.isfile(f)): raise AnsibleError(f + " does not exist") for f in self.args: self.editor.rekey_file(f, self.new_vault_pass) display.display("Rekey successful", stderr=True) ansible-2.1.1.0/lib/ansible/compat/0000775000175400017540000000000012746444530020103 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/compat/six/0000775000175400017540000000000012746444530020706 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/compat/six/__init__.py0000664000175400017540000000336112746444466023032 0ustar jenkinsjenkins00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat six library. RHEL7 has python-six 1.3.0 which is too old ''' # The following makes it easier for us to script updates of the bundled code _BUNDLED_METADATA = { "pypi_name": "six", "version": "1.10.0" } import os.path try: import six as _system_six except ImportError: _system_six = None if _system_six: # If we need some things from even newer versions of six, then we need to # use our bundled copy instead if ( # Added in six-1.8.0 not hasattr(_system_six.moves, 'shlex_quote') or # Added in six-1.4.0 not hasattr(_system_six, 'byte2int') or not hasattr(_system_six, 'add_metaclass') or not hasattr(_system_six.moves, 'urllib') ): _system_six = False if _system_six: six = _system_six else: from . import _six as six six_py_file = '{0}.py'.format(os.path.splitext(six.__file__)[0]) exec(open(six_py_file, 'rb').read()) ansible-2.1.1.0/lib/ansible/compat/six/_six.py0000664000175400017540000007262212746444466022243 0ustar jenkinsjenkins00000000000000"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson " __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer) ansible-2.1.1.0/lib/ansible/compat/tests/0000775000175400017540000000000012746444530021245 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/compat/tests/__init__.py0000664000175400017540000000236412746444466023373 0ustar jenkinsjenkins00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' This module contains things that are only needed for compat in the testsuites, not in ansible itself. If you are not installing the test suite, you can safely remove this subdirectory. ''' # # Compat for python2.7 # # One unittest needs to import builtins via __import__() so we need to have # the string that represents it try: import __builtin__ except ImportError: BUILTINS = 'builtins' else: BUILTINS = '__builtin__' ansible-2.1.1.0/lib/ansible/compat/tests/mock.py0000664000175400017540000000233112746444466022557 0ustar jenkinsjenkins00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat module for Python3.x's unittest.mock module ''' # Python 2.7 # Note: Could use the pypi mock library on python3.x as well as python2.x. It # is the same as the python3 stdlib mock library try: from unittest.mock import * except ImportError: # Python 2 try: from mock import * except ImportError: print('You need the mock library installed on python2.x to run tests') ansible-2.1.1.0/lib/ansible/compat/tests/unittest.py0000664000175400017540000000217312746444466023511 0ustar jenkinsjenkins00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat module for Python2.7's unittest module ''' import sys # Python 2.6 if sys.version_info < (2, 7): try: # Need unittest2 on python2.6 from unittest2 import * except ImportError: print('You need unittest2 installed on python2.6.x to run tests') else: from unittest import * ansible-2.1.1.0/lib/ansible/compat/__init__.py0000664000175400017540000000210012746444466022215 0ustar jenkinsjenkins00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat library for ansible. This contains compatibility definitions for older python When we need to import a module differently depending on python version, do it here. Then in the code we can simply import from compat in order to get what we want. ''' ansible-2.1.1.0/lib/ansible/config/0000775000175400017540000000000012746444530020065 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/config/__init__.py0000664000175400017540000000150112746444466022203 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ansible-2.1.1.0/lib/ansible/errors/0000775000175400017540000000000012746444530020134 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/errors/__init__.py0000664000175400017540000001730312746444466022261 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors.yaml_strings import ( YAML_POSITION_DETAILS, YAML_COMMON_UNQUOTED_VARIABLE_ERROR, YAML_COMMON_DICT_ERROR, YAML_COMMON_UNQUOTED_COLON_ERROR, YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR, YAML_COMMON_UNBALANCED_QUOTES_ERROR ) from ansible.utils.unicode import to_unicode, to_str class AnsibleError(Exception): ''' This is the base class for all errors raised from Ansible code, and can be instantiated with two optional parameters beyond the error message to control whether detailed information is displayed when the error occurred while parsing a data file of some kind. Usage: raise AnsibleError('some message here', obj=obj, show_content=True) Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject, which should be returned by the DataLoader() class. ''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False): # we import this here to prevent an import loop problem, # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject self._obj = obj self._show_content = show_content if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error and not suppress_extended_error: self.message = '%s\n\n%s' % (to_str(message), to_str(extended_error)) else: self.message = '%s' % to_str(message) else: self.message = '%s' % to_str(message) def __str__(self): return self.message def __repr__(self): return self.message def _get_error_lines_from_file(self, file_name, line_number): ''' Returns the line in the file which coresponds to the reported error location, as well as the line preceding it (if the error did not occur on the first line), to provide context to the error. ''' target_line = '' prev_line = '' with open(file_name, 'r') as f: lines = f.readlines() target_line = lines[line_number] if line_number > 0: prev_line = lines[line_number - 1] return (target_line, prev_line) def _get_extended_error(self): ''' Given an object reporting the location of the exception in a file, return detailed information regarding it including: * the line which caused the error as well as the one preceding it * causes and suggested remedies for common syntax errors If this error was created with show_content=False, the reporting of content is suppressed, as the file contents may be sensitive (ie. vault data). ''' error_message = '' try: (src_file, line_number, col_number) = self._obj.ansible_pos error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number) if src_file not in ('', '') and self._show_content: (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) target_line = to_unicode(target_line) prev_line = to_unicode(prev_line) if target_line: stripped_line = target_line.replace(" ","") arrow_line = (" " * (col_number-1)) + "^ here" #header_line = ("=" * 73) error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) # common error/remediation checking here: # check for unquoted vars starting lines if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line): error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR # check for common dictionary mistakes elif ":{{" in stripped_line and "}}" in stripped_line: error_message += YAML_COMMON_DICT_ERROR # check for common unquoted colon mistakes elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1: error_message += YAML_COMMON_UNQUOTED_COLON_ERROR # otherwise, check for some common quoting mistakes else: parts = target_line.split(":") if len(parts) > 1: middle = parts[1].strip() match = False unbalanced = False if middle.startswith("'") and not middle.endswith("'"): match = True elif middle.startswith('"') and not middle.endswith('"'): match = True if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2: unbalanced = True if match: error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR if unbalanced: error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR except (IOError, TypeError): error_message += '\n(could not open file to display line)' except IndexError: error_message += '\n(specified line no longer in file, maybe it changed?)' return error_message class AnsibleOptionsError(AnsibleError): ''' bad or incomplete options passed ''' pass class AnsibleParserError(AnsibleError): ''' something was detected early that is wrong about a playbook or data file ''' pass class AnsibleInternalError(AnsibleError): ''' internal safeguards tripped, something happened in the code that should never happen ''' pass class AnsibleRuntimeError(AnsibleError): ''' ansible had a problem while running a playbook ''' pass class AnsibleModuleError(AnsibleRuntimeError): ''' a module failed somehow ''' pass class AnsibleConnectionFailure(AnsibleRuntimeError): ''' the transport / connection_plugin had a fatal error ''' pass class AnsibleFilterError(AnsibleRuntimeError): ''' a templating failure ''' pass class AnsibleLookupError(AnsibleRuntimeError): ''' a lookup failure ''' pass class AnsibleCallbackError(AnsibleRuntimeError): ''' a callback failure ''' pass class AnsibleUndefinedVariable(AnsibleRuntimeError): ''' a templating failure ''' pass class AnsibleFileNotFound(AnsibleRuntimeError): ''' a file missing failure ''' pass ansible-2.1.1.0/lib/ansible/errors/yaml_strings.py0000664000175400017540000000653312746444466023240 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type __all__ = [ 'YAML_SYNTAX_ERROR', 'YAML_POSITION_DETAILS', 'YAML_COMMON_DICT_ERROR', 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR', 'YAML_COMMON_UNQUOTED_COLON_ERROR', 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR', 'YAML_COMMON_UNBALANCED_QUOTES_ERROR', ] YAML_SYNTAX_ERROR = """\ Syntax Error while loading YAML. """ YAML_POSITION_DETAILS = """\ The error appears to have been in '%s': line %s, column %s, but may be elsewhere in the file depending on the exact syntax problem. """ YAML_COMMON_DICT_ERROR = """\ This one looks easy to fix. YAML thought it was looking for the start of a hash/dictionary and was confused to see a second "{". Most likely this was meant to be an ansible template evaluation instead, so we have to give the parser a small hint that we wanted a string instead. The solution here is to just quote the entire value. For instance, if the original line was: app_path: {{ base_path }}/foo It should be written as: app_path: "{{ base_path }}/foo" """ YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\ We could be wrong, but this one looks like it might be an issue with missing quotes. Always quote template expression brackets when they start a value. For instance: with_items: - {{ foo }} Should be written as: with_items: - "{{ foo }}" """ YAML_COMMON_UNQUOTED_COLON_ERROR = """\ This one looks easy to fix. There seems to be an extra unquoted colon in the line and this is confusing the parser. It was only expecting to find one free colon. The solution is just add some quotes around the colon, or quote the entire line after the first colon. For instance, if the original line was: copy: src=file.txt dest=/path/filename:with_colon.txt It can be written as: copy: src=file.txt dest='/path/filename:with_colon.txt' Or: copy: 'src=file.txt dest=/path/filename:with_colon.txt' """ YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\ This one looks easy to fix. It seems that there is a value started with a quote, and the YAML parser is expecting to see the line ended with the same kind of quote. For instance: when: "ok" in result.stdout Could be written as: when: '"ok" in result.stdout' Or equivalently: when: "'ok' in result.stdout" """ YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\ We could be wrong, but this one looks like it might be an issue with unbalanced quotes. If starting a value with a quote, make sure the line ends with the same set of quotes. For instance this arbitrary example: foo: "bad" "wolf" Could be written as: foo: '"bad" "wolf"' """ ansible-2.1.1.0/lib/ansible/executor/0000775000175400017540000000000012746444530020456 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/executor/process/0000775000175400017540000000000012746444530022134 5ustar jenkinsjenkins00000000000000ansible-2.1.1.0/lib/ansible/executor/process/__init__.py0000664000175400017540000000150212746444466024253 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ansible-2.1.1.0/lib/ansible/executor/process/result.py0000664000175400017540000002011312746444466024031 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.six.moves import queue from ansible.compat.six import iteritems, text_type from ansible.vars import strip_internal_keys import multiprocessing import time import traceback # TODO: not needed if we use the cryptography library with its default RNG # engine HAS_ATFORK=True try: from Crypto.Random import atfork except ImportError: HAS_ATFORK=False try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['ResultProcess'] class ResultProcess(multiprocessing.Process): ''' The result worker thread, which reads results from the results queue and fires off callbacks/etc. as necessary. ''' def __init__(self, final_q, workers): # takes a task queue manager as the sole param: self._final_q = final_q self._workers = workers self._cur_worker = 0 self._terminated = False super(ResultProcess, self).__init__() def _send_result(self, result): display.debug(u"sending result: %s" % ([text_type(x) for x in result],)) self._final_q.put(result) display.debug("done sending result") def _read_worker_result(self): result = None starting_point = self._cur_worker while True: (worker_prc, rslt_q) = self._workers[self._cur_worker] self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 try: if not rslt_q.empty(): display.debug("worker %d has data to read" % self._cur_worker) result = rslt_q.get() display.debug("got a result from worker %d: %s" % (self._cur_worker, result)) break except queue.Empty: pass if self._cur_worker == starting_point: break return result def terminate(self): self._terminated = True super(ResultProcess, self).terminate() def run(self): ''' The main thread execution, which reads from the results queue indefinitely and sends callbacks/etc. when results are received. ''' if HAS_ATFORK: atfork() while True: try: result = self._read_worker_result() if result is None: time.sleep(0.0001) continue # send callbacks for 'non final' results if '_ansible_retry' in result._result: self._send_result(('v2_runner_retry', result)) continue elif '_ansible_item_result' in result._result: if result.is_failed() or result.is_unreachable(): self._send_result(('v2_runner_item_on_failed', result)) elif result.is_skipped(): self._send_result(('v2_runner_item_on_skipped', result)) else: self._send_result(('v2_runner_item_on_ok', result)) if 'diff' in result._result: self._send_result(('v2_on_file_diff', result)) continue clean_copy = strip_internal_keys(result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] # if this task is registering a result, do it now if result._task.register: self._send_result(('register_host_var', result._host, result._task, clean_copy)) # send callbacks, execute other options based on the result status # TODO: this should all be cleaned up and probably moved to a sub-function. # the fact that this sometimes sends a TaskResult and other times # sends a raw dictionary back may be confusing, but the result vs. # results implementation for tasks with loops should be cleaned up # better than this if result.is_unreachable(): self._send_result(('host_unreachable', result)) elif result.is_failed(): self._send_result(('host_task_failed', result)) elif result.is_skipped(): self._send_result(('host_task_skipped', result)) else: if result._task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = result._result.get('results', []) else: result_items = [ result._result ] for result_item in result_items: # if this task is notifying a handler, do it now if '_ansible_notify' in result_item: if result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for notify in result_item['_ansible_notify']: self._send_result(('notify_handler', result, notify)) if 'add_host' in result_item: # this task added a new host (add_host module) self._send_result(('add_host', result_item)) elif 'add_group' in result_item: # this task added a new group (group_by module) self._send_result(('add_group', result._host, result_item)) elif 'ansible_facts' in result_item: # if this task is registering facts, do that now loop_var = 'item' if result._task.loop_control: loop_var = result._task.loop_control.get('loop_var') or 'item' item = result_item.get(loop_var, None) if result._task.action == 'include_vars': for (key, value) in iteritems(result_item['ansible_facts']): self._send_result(('set_host_var', result._host, result._task, item, key, value)) else: self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts'])) # finally, send the ok for this task self._send_result(('host_task_ok', result)) except queue.Empty: pass except (KeyboardInterrupt, SystemExit, IOError, EOFError): break except: # TODO: we should probably send a proper callback here instead of # simply dumping a stack trace on the screen traceback.print_exc() break ansible-2.1.1.0/lib/ansible/executor/process/worker.py0000664000175400017540000001233612746444466024034 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.six.moves import queue import json import multiprocessing import os import signal import sys import time import traceback import zlib from jinja2.exceptions import TemplateNotFound # TODO: not needed if we use the cryptography library with its default RNG # engine HAS_ATFORK=True try: from Crypto.Random import atfork except ImportError: HAS_ATFORK=False from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.executor.task_executor import TaskExecutor from ansible.executor.task_result import TaskResult from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.vars.unsafe_proxy import AnsibleJSONUnsafeDecoder from ansible.utils.unicode import to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['WorkerProcess'] class WorkerProcess(multiprocessing.Process): ''' The worker thread class, which uses TaskExecutor to run tasks read from a job queue and pushes results into a results queue for reading later. ''' def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj): super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: self._rslt_q = rslt_q self._task_vars = task_vars self._host = host self._task = task self._play_context = play_context self._loader = loader self._variable_manager = variable_manager self._shared_loader_obj = shared_loader_obj # dupe stdin, if we have one self._new_stdin = sys.stdin try: fileno = sys.stdin.fileno() if fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) except OSError: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass except ValueError: # couldn't get stdin's fileno, so we just carry on pass def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) display.debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) display.debug("WORKER PROCESS EXITING") ansible-2.1.1.0/lib/ansible/executor/__init__.py0000664000175400017540000000150212746444466022575 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ansible-2.1.1.0/lib/ansible/executor/module_common.py0000664000175400017540000010165312746444466023703 0ustar jenkinsjenkins00000000000000# (c) 2013-2014, Michael DeHaan # (c) 2015 Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import ast import base64 import imp import json import os import shlex import zipfile from io import BytesIO # from Ansible from ansible.release import __version__, __author__ from ansible import constants as C from ansible.errors import AnsibleError from ansible.utils.unicode import to_bytes, to_unicode # Must import strategy and use write_locks from there # If we import write_locks directly then we end up binding a # variable to the object and then it never gets updated. from ansible.plugins import strategy try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() REPLACER = b"#<>" REPLACER_VERSION = b"\"<>\"" REPLACER_COMPLEX = b"\"<>\"" REPLACER_WINDOWS = b"# POWERSHELL_COMMON" REPLACER_JSONARGS = b"<>" REPLACER_SELINUX = b"<>" # We could end up writing out parameters with unicode characters so we need to # specify an encoding for the python source file ENCODING_STRING = u'# -*- coding: utf-8 -*-' # we've moved the module_common relative to the snippets, so fix the path _SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') # ****************************************************************************** ZIPLOADER_TEMPLATE = u'''%(shebang)s %(coding)s ZIPLOADER_WRAPPER = True # For test-module script to tell this is a ZIPLOADER_WRAPPER # This code is part of Ansible, but is an independent component. # The code in this particular templatable string, and this templatable string # only, is BSD licensed. Modules which end up using this snippet, which is # dynamically combined together by Ansible still belong to the author of the # module, and they may assign their own license to the complete work. # # Copyright (c), James Cammarata, 2016 # Copyright (c), Toshio Kuratomi, 2016 # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import base64 import shutil import zipfile import tempfile import subprocess if sys.version_info < (3,): bytes = str PY3 = False else: unicode = str PY3 = True try: # Python-2.6+ from io import BytesIO as IOStream except ImportError: # Python < 2.6 from StringIO import StringIO as IOStream ZIPDATA = """%(zipdata)s""" def invoke_module(module, modlib_path, json_params): pythonpath = os.environ.get('PYTHONPATH') if pythonpath: os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath)) else: os.environ['PYTHONPATH'] = modlib_path p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate(json_params) if not isinstance(stderr, (bytes, unicode)): stderr = stderr.read() if not isinstance(stdout, (bytes, unicode)): stdout = stdout.read() if PY3: sys.stderr.buffer.write(stderr) sys.stdout.buffer.write(stdout) else: sys.stderr.write(stderr) sys.stdout.write(stdout) return p.returncode def debug(command, zipped_mod, json_params): # The code here normally doesn't run. It's only used for debugging on the # remote machine. # # The subcommands in this function make it easier to debug ziploader # modules. Here's the basic steps: # # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv # to save the module file remotely:: # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv # # Part of the verbose output will tell you where on the remote machine the # module was written to:: # [...] # SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"'' # [...] # # Login to the remote machine and run the module file via from the previous # step with the explode subcommand to extract the module payload into # source files:: # $ ssh host1 # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode # Module expanded into: # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible # # You can now edit the source files to instrument the code or experiment with # different parameter values. When you're ready to run the code you've modified # (instead of the code from the actual zipped module), use the execute subcommand like this:: # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute # Okay to use __file__ here because we're running from a kept file basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir') args_path = os.path.join(basedir, 'args') script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py') if command == 'explode': # transform the ZIPDATA into an exploded directory of code and then # print the path to the code. This is an easy way for people to look # at the code on the remote machine for debugging it in that # environment z = zipfile.ZipFile(zipped_mod) for filename in z.namelist(): if filename.startswith('/'): raise Exception('Something wrong with this module zip file: should not contain absolute paths') dest_filename = os.path.join(basedir, filename) if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename): os.makedirs(dest_filename) else: directory = os.path.dirname(dest_filename) if not os.path.exists(directory): os.makedirs(directory) f = open(dest_filename, 'w') f.write(z.read(filename)) f.close() # write the args file f = open(args_path, 'w') f.write(json_params) f.close() print('Module expanded into:') print('%%s' %% basedir) exitcode = 0 elif command == 'execute': # Execute the exploded code instead of executing the module from the # embedded ZIPDATA. This allows people to easily run their modified # code on the remote machine to see how changes will affect it. # This differs slightly from default Ansible execution of Python modules # as it passes the arguments to the module via a file instead of stdin. # Set pythonpath to the debug dir pythonpath = os.environ.get('PYTHONPATH') if pythonpath: os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath)) else: os.environ['PYTHONPATH'] = basedir p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if not isinstance(stderr, (bytes, unicode)): stderr = stderr.read() if not isinstance(stdout, (bytes, unicode)): stdout = stdout.read() if PY3: sys.stderr.buffer.write(stderr) sys.stdout.buffer.write(stdout) else: sys.stderr.write(stderr) sys.stdout.write(stdout) return p.returncode elif command == 'excommunicate': # This attempts to run the module in-process (by importing a main # function and then calling it). It is not the way ansible generally # invokes the module so it won't work in every case. It is here to # aid certain debuggers which work better when the code doesn't change # from one process to another but there may be problems that occur # when using this that are only artifacts of how we're invoking here, # not actual bugs (as they don't affect the real way that we invoke # ansible modules) # stub the args and python path sys.argv = ['%(ansible_module)s', args_path] sys.path.insert(0, basedir) from ansible_module_%(ansible_module)s import main main() print('WARNING: Module returned to wrapper instead of exiting') sys.exit(1) else: print('WARNING: Unknown debug command. Doing nothing.') exitcode = 0 return exitcode if __name__ == '__main__': # # See comments in the debug() method for information on debugging # ZIPLOADER_PARAMS = %(params)s if PY3: ZIPLOADER_PARAMS = ZIPLOADER_PARAMS.encode('utf-8') try: # There's a race condition with the controller removing the # remote_tmpdir and this module executing under async. So we cannot # store this in remote_tmpdir (use system tempdir instead) temp_path = tempfile.mkdtemp(prefix='ansible_') zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip') modlib = open(zipped_mod, 'wb') modlib.write(base64.b64decode(ZIPDATA)) modlib.close() if len(sys.argv) == 2: exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS) else: z = zipfile.ZipFile(zipped_mod, mode='r') module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py') f = open(module, 'wb') f.write(z.read('ansible_module_%(ansible_module)s.py')) f.close() # When installed via setuptools (including python setup.py install), # ansible may be installed with an easy-install.pth file. That file # may load the system-wide install of ansible rather than the one in # the module. sitecustomize is the only way to override that setting. z = zipfile.ZipFile(zipped_mod, mode='a') # py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end z = zipfile.ZipFile(zipped_mod, mode='a') sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod sitecustomize = sitecustomize.encode('utf-8') z.writestr('sitecustomize.py', sitecustomize) z.close() exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS) finally: try: shutil.rmtree(temp_path) except OSError: # tempdir creation probably failed pass sys.exit(exitcode) ''' def _strip_comments(source): # Strip comments and blank lines from the wrapper buf = [] for line in source.splitlines(): l = line.strip() if not l or l.startswith(u'#'): continue buf.append(line) return u'\n'.join(buf) if C.DEFAULT_KEEP_REMOTE_FILES: # Keep comments when KEEP_REMOTE_FILES is set. That way users will see # the comments with some nice usage instructions ACTIVE_ZIPLOADER_TEMPLATE = ZIPLOADER_TEMPLATE else: # ZIPLOADER_TEMPLATE stripped of comments for smaller over the wire size ACTIVE_ZIPLOADER_TEMPLATE = _strip_comments(ZIPLOADER_TEMPLATE) class ModuleDepFinder(ast.NodeVisitor): # Caveats: # This code currently does not handle: # * relative imports from py2.6+ from . import urls IMPORT_PREFIX_SIZE = len('ansible.module_utils.') def __init__(self, *args, **kwargs): """ Walk the ast tree for the python module. Save submodule[.submoduleN][.identifier] into self.submodules self.submodules will end up with tuples like: - ('basic',) - ('urls', 'fetch_url') - ('database', 'postgres') - ('database', 'postgres', 'quote') It's up to calling code to determine whether the final element of the dotted strings are module names or something else (function, class, or variable names) """ super(ModuleDepFinder, self).__init__(*args, **kwargs) self.submodules = set() def visit_Import(self, node): # import ansible.module_utils.MODLIB[.MODLIBn] [as asname] for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')): py_mod = alias.name[self.IMPORT_PREFIX_SIZE:] self.submodules.add((py_mod,)) self.generic_visit(node) def visit_ImportFrom(self, node): if node.module.startswith('ansible.module_utils'): where_from = node.module[self.IMPORT_PREFIX_SIZE:] if where_from: # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname] py_mod = tuple(where_from.split('.')) for alias in node.names: self.submodules.add(py_mod + (alias.name,)) else: # from ansible.module_utils import MODLIB [,MODLIB2] [as asname] for alias in node.names: self.submodules.add((alias.name,)) self.generic_visit(node) def _slurp(path): if not os.path.exists(path): raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path)) fd = open(path, 'rb') data = fd.read() fd.close() return data def _get_shebang(interpreter, task_vars, args=tuple()): """ Note not stellar API: Returns None instead of always returning a shebang line. Doing it this way allows the caller to decide to use the shebang it read from the file rather than trust that we reformatted what they already have correctly. """ interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip() if interpreter_config not in task_vars: return (None, interpreter) interpreter = task_vars[interpreter_config].strip() shebang = u'#!' + interpreter if args: shebang = shebang + u' ' + u' '.join(args) return (shebang, interpreter) def recursive_finder(name, data, py_module_names, py_module_cache, zf): """ Using ModuleDepFinder, make sure we have all of the module_utils files that the module its module_utils files needs. """ # Parse the module and find the imports of ansible.module_utils tree = ast.parse(data) finder = ModuleDepFinder() finder.visit(tree) # # Determine what imports that we've found are modules (vs class, function. # variable names) for packages # normalized_modules = set() # Loop through the imports that we've found to normalize them # Exclude paths that match with paths we've already processed # (Have to exclude them a second time once the paths are processed) for py_module_name in finder.submodules.difference(py_module_names): module_info = None # Check whether either the last or the second to last identifier is # a module name for idx in (1, 2): if len(py_module_name) < idx: break try: module_info = imp.find_module(py_module_name[-idx], [os.path.join(_SNIPPET_PATH, *py_module_name[:-idx])]) break except ImportError: continue # Could not find the module. Construct a helpful error message. if module_info is None: msg = ['Could not find imported module support code for %s. Looked for' % name] if idx == 2: msg.append('either %s or %s' % (py_module_name[-1], py_module_name[-2])) else: msg.append(py_module_name[-1]) raise AnsibleError(' '.join(msg)) if idx == 2: # We've determined that the last portion was an identifier and # thus, not part of the module name py_module_name = py_module_name[:-1] # If not already processed then we've got work to do if py_module_name not in py_module_names: # If not in the cache, then read the file into the cache # We already have a file handle for the module open so it makes # sense to read it now if py_module_name not in py_module_cache: if module_info[2][2] == imp.PKG_DIRECTORY: # Read the __init__.py instead of the module file as this is # a python package py_module_cache[py_module_name + ('__init__',)] = _slurp(os.path.join(os.path.join(_SNIPPET_PATH, *py_module_name), '__init__.py')) normalized_modules.add(py_module_name + ('__init__',)) else: py_module_cache[py_module_name] = module_info[0].read() module_info[0].close() normalized_modules.add(py_module_name) # Make sure that all the packages that this module is a part of # are also added for i in range(1, len(py_module_name)): py_pkg_name = py_module_name[:-i] + ('__init__',) if py_pkg_name not in py_module_names: normalized_modules.add(py_pkg_name) py_module_cache[py_pkg_name] = _slurp('%s.py' % os.path.join(_SNIPPET_PATH, *py_pkg_name)) # # iterate through all of the ansible.module_utils* imports that we haven't # already checked for new imports # # set of modules that we haven't added to the zipfile unprocessed_py_module_names = normalized_modules.difference(py_module_names) for py_module_name in unprocessed_py_module_names: py_module_path = os.path.join(*py_module_name) py_module_file_name = '%s.py' % py_module_path zf.writestr(os.path.join("ansible/module_utils", py_module_file_name), py_module_cache[py_module_name]) # Add the names of the files we're scheduling to examine in the loop to # py_module_names so that we don't re-examine them in the next pass # through recursive_finder() py_module_names.update(unprocessed_py_module_names) for py_module_file in unprocessed_py_module_names: recursive_finder(py_module_file, py_module_cache[py_module_file], py_module_names, py_module_cache, zf) # Save memory; the file won't have to be read again for this ansible module. del py_module_cache[py_module_file] def _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression): """ Given the source of the module, convert it to a Jinja2 template to insert module code and return whether it's a new or old style module. """ module_substyle = module_style = 'old' # module_style is something important to calling code (ActionBase). It # determines how arguments are formatted (json vs k=v) and whether # a separate arguments file needs to be sent over the wire. # module_substyle is extra information that's useful internally. It tells # us what we have to look to substitute in the module files and whether # we're using module replacer or ziploader to format the module itself. if REPLACER in module_data: # Do REPLACER before from ansible.module_utils because we need make sure # we substitute "from ansible.module_utils basic" for REPLACER module_style = 'new' module_substyle = 'python' module_data = module_data.replace(REPLACER, b'from ansible.module_utils.basic import *') elif b'from ansible.module_utils.' in module_data: module_style = 'new' module_substyle = 'python' elif REPLACER_WINDOWS in module_data: module_style = 'new' module_substyle = 'powershell' elif REPLACER_JSONARGS in module_data: module_style = 'new' module_substyle = 'jsonargs' elif b'WANT_JSON' in module_data: module_substyle = module_style = 'non_native_want_json' shebang = None # Neither old-style nor non_native_want_json modules should be modified # except for the shebang line (Done by modify_module) if module_style in ('old', 'non_native_want_json'): return module_data, module_style, shebang output = BytesIO() py_module_names = set() if module_substyle == 'python': params = dict(ANSIBLE_MODULE_ARGS=module_args,) python_repred_params = to_bytes(repr(json.dumps(params)), errors='strict') try: compression_method = getattr(zipfile, module_compression) except AttributeError: display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression) compression_method = zipfile.ZIP_STORED lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ziploader_cache') cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression)) zipdata = None # Optimization -- don't lock if the module has already been cached if os.path.exists(cached_module_filename): display.debug('ZIPLOADER: using cached module: %s' % cached_module_filename) zipdata = open(cached_module_filename, 'rb').read() # Fool the check later... I think we should just remove the check py_module_names.add(('basic',)) else: if module_name in strategy.action_write_locks: display.debug('ZIPLOADER: Using lock for %s' % module_name) lock = strategy.action_write_locks[module_name] else: # If the action plugin directly invokes the module (instead of # going through a strategy) then we don't have a cross-process # Lock specifically for this module. Use the "unexpected # module" lock instead display.debug('ZIPLOADER: Using generic lock for %s' % module_name) lock = strategy.action_write_locks[None] display.debug('ZIPLOADER: Acquiring lock') with lock: display.debug('ZIPLOADER: Lock acquired: %s' % id(lock)) # Check that no other process has created this while we were # waiting for the lock if not os.path.exists(cached_module_filename): display.debug('ZIPLOADER: Creating module') # Create the module zip data zipoutput = BytesIO() zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method) zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\ntry:\n from ansible.release import __version__,__author__\nexcept ImportError:\n __version__="' + to_bytes(__version__) + b'"\n __author__="' + to_bytes(__author__) + b'"\n') zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n') zf.writestr('ansible_module_%s.py' % module_name, module_data) py_module_cache = { ('__init__',): b'' } recursive_finder(module_name, module_data, py_module_names, py_module_cache, zf) zf.close() zipdata = base64.b64encode(zipoutput.getvalue()) # Write the assembled module to a temp file (write to temp # so that no one looking for the file reads a partially # written file) if not os.path.exists(lookup_path): # Note -- if we have a global function to setup, that would # be a better place to run this os.mkdir(lookup_path) display.debug('ZIPLOADER: Writing module') with open(cached_module_filename + '-part', 'w') as f: f.write(zipdata) # Rename the file into its final position in the cache so # future users of this module can read it off the # filesystem instead of constructing from scratch. display.debug('ZIPLOADER: Renaming module') os.rename(cached_module_filename + '-part', cached_module_filename) display.debug('ZIPLOADER: Done creating module') if zipdata is None: display.debug('ZIPLOADER: Reading module after lock') # Another process wrote the file while we were waiting for # the write lock. Go ahead and read the data from disk # instead of re-creating it. try: zipdata = open(cached_module_filename, 'rb').read() except IOError: raise AnsibleError('A different worker process failed to create module file. Look at traceback for that process for debugging information.') # Fool the check later... I think we should just remove the check py_module_names.add(('basic',)) shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars) if shebang is None: shebang = u'#!/usr/bin/python' executable = interpreter.split(u' ', 1) if len(executable) == 2 and executable[0].endswith(u'env'): # Handle /usr/bin/env python style interpreter settings interpreter = u"'{0}', '{1}'".format(*executable) else: # Still have to enclose the parts of the interpreter in quotes # because we're substituting it into the template as a python # string interpreter = u"'{0}'".format(interpreter) output.write(to_bytes(ACTIVE_ZIPLOADER_TEMPLATE % dict( zipdata=zipdata, ansible_module=module_name, params=python_repred_params, shebang=shebang, interpreter=interpreter, coding=ENCODING_STRING, ))) module_data = output.getvalue() # Sanity check from 1.x days. Maybe too strict. Some custom python # modules that use ziploader may implement their own helpers and not # need basic.py. All the constants that we substituted into basic.py # for module_replacer are now available in other, better ways. if ('basic',) not in py_module_names: raise AnsibleError("missing required import in %s: Did not import ansible.module_utils.basic for boilerplate helper code" % module_path) elif module_substyle == 'powershell': # Module replacer for jsonargs and windows lines = module_data.split(b'\n') for line in lines: if REPLACER_WINDOWS in line: ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1")) output.write(ps_data) py_module_names.add((b'powershell',)) continue output.write(line + b'\n') module_data = output.getvalue() module_args_json = to_bytes(json.dumps(module_args)) module_data = module_data.replace(REPLACER_JSONARGS, module_args_json) # Sanity check from 1.x days. This is currently useless as we only # get here if we are going to substitute powershell.ps1 into the # module anyway. Leaving it for when/if we add other powershell # module_utils files. if (b'powershell',) not in py_module_names: raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path) elif module_substyle == 'jsonargs': module_args_json = to_bytes(json.dumps(module_args)) # these strings could be included in a third-party module but # officially they were included in the 'basic' snippet for new-style # python modules (which has been replaced with something else in # ziploader) If we remove them from jsonargs-style module replacer # then we can remove them everywhere. python_repred_args = to_bytes(repr(module_args_json)) module_data = module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__))) module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args) module_data = module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) # The main event -- substitute the JSON args string into the module module_data = module_data.replace(REPLACER_JSONARGS, module_args_json) facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='strict') module_data = module_data.replace(b'syslog.LOG_USER', facility) return (module_data, module_style, shebang) # ****************************************************************************** def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in a non-bootstrapping scenario by not moving extra files over the wire and also takes care of embedding arguments in the transferred modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: from ansible.module_utils.basic import * ... will result in the insertion of basic.py into the module from the module_utils/ directory in the source tree. All modules are required to import at least basic, though there will also be other snippets. For powershell, there's equivalent conventions like this: # POWERSHELL_COMMON which results in the inclusion of the common code from powershell.ps1 """ with open(module_path, 'rb') as f: # read in the module source module_data = f.read() (module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression) if shebang is None: lines = module_data.split(b"\n", 1) if lines[0].startswith(b"#!"): shebang = lines[0].strip() args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter = to_bytes(interpreter) new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='strict', nonstring='passthru') if new_shebang: lines[0] = shebang = new_shebang if os.path.basename(interpreter).startswith(b'python'): lines.insert(1, to_bytes(ENCODING_STRING)) else: # No shebang, assume a binary module? pass module_data = b"\n".join(lines) else: shebang = to_bytes(shebang, errors='strict') return (module_data, module_style, shebang) ansible-2.1.1.0/lib/ansible/executor/play_iterator.py0000664000175400017540000006526412746444466023733 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fnmatch from ansible.compat.six import iteritems from ansible import constants as C from ansible.errors import AnsibleError from ansible.playbook.block import Block from ansible.playbook.task import Task from ansible.utils.boolean import boolean __all__ = ['PlayIterator'] try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class HostState: def __init__(self, blocks): self._blocks = blocks[:] self.cur_block = 0 self.cur_regular_task = 0 self.cur_rescue_task = 0 self.cur_always_task = 0 self.cur_role = None self.cur_dep_chain = None self.run_state = PlayIterator.ITERATING_SETUP self.fail_state = PlayIterator.FAILED_NONE self.pending_setup = False self.tasks_child_state = None self.rescue_child_state = None self.always_child_state = None self.did_start_at_task = False def __repr__(self): return "HostState(%r)" % self._blocks def __str__(self): def _run_state_to_string(n): states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] try: return states[n] except IndexError: return "UNKNOWN STATE" def _failed_state_to_string(n): states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"} if n == 0: return "FAILED_NONE" else: ret = [] for i in (1, 2, 4, 8): if n & i: ret.append(states[i]) return "|".join(ret) return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s, did start at task? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, self.cur_always_task, self.cur_role, _run_state_to_string(self.run_state), _failed_state_to_string(self.fail_state), self.pending_setup, self.tasks_child_state, self.rescue_child_state, self.always_child_state, self.did_start_at_task, ) def __eq__(self, other): if not isinstance(other, HostState): return False for attr in ( '_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_role', 'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain', 'tasks_child_state', 'rescue_child_state', 'always_child_state' ): if getattr(self, attr) != getattr(other, attr): return False return True def get_current_block(self): return self._blocks[self.cur_block] def copy(self): new_state = HostState(self._blocks) new_state.cur_block = self.cur_block new_state.cur_regular_task = self.cur_regular_task new_state.cur_rescue_task = self.cur_rescue_task new_state.cur_always_task = self.cur_always_task new_state.cur_role = self.cur_role new_state.run_state = self.run_state new_state.fail_state = self.fail_state new_state.pending_setup = self.pending_setup new_state.did_start_at_task = self.did_start_at_task if self.cur_dep_chain is not None: new_state.cur_dep_chain = self.cur_dep_chain[:] if self.tasks_child_state is not None: new_state.tasks_child_state = self.tasks_child_state.copy() if self.rescue_child_state is not None: new_state.rescue_child_state = self.rescue_child_state.copy() if self.always_child_state is not None: new_state.always_child_state = self.always_child_state.copy() return new_state class PlayIterator: # the primary running states for the play iteration ITERATING_SETUP = 0 ITERATING_TASKS = 1 ITERATING_RESCUE = 2 ITERATING_ALWAYS = 3 ITERATING_COMPLETE = 4 # the failure states for the play iteration, which are powers # of 2 as they may be or'ed together in certain circumstances FAILED_NONE = 0 FAILED_SETUP = 1 FAILED_TASKS = 2 FAILED_RESCUE = 4 FAILED_ALWAYS = 8 def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] # Default options to gather gather_subset = C.DEFAULT_GATHER_SUBSET # Retrieve subset to gather if self._play.gather_subset is not None: gather_subset = self._play.gather_subset setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' setup_task.tags = ['always'] setup_task.args = { 'gather_subset': gather_subset, } setup_task.set_loader(self._play._loader) setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(play_context, all_vars) self._blocks.append(setup_block) for block in self._play.compile(): new_block = block.filter_tagged_tasks(play_context, all_vars) if new_block.has_tasks(): self._blocks.append(new_block) self._host_states = {} start_at_matched = False for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) # if the host's name is in the variable manager's fact cache, then set # its _gathered_facts flag to true for smart gathering tests later if host.name in variable_manager._fact_cache: host._gathered_facts = True # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == self.ITERATING_COMPLETE: break if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break else: self.get_next_task_for_host(host) # finally, reset the host's state to ITERATING_SETUP if start_at_matched: self._host_states[host.name].did_start_at_task = True self._host_states[host.name].run_state = self.ITERATING_SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None # Extend the play handlers list to include the handlers defined in roles self._play.handlers.extend(play.compile_roles_handlers()) def get_host_state(self, host): # Since we're using the PlayIterator to carry forward failed hosts, # in the event that a previous host was not in the current inventory # we create a stub state for it now if host.name not in self._host_states: self._host_states[host.name] = HostState(blocks=[]) return self._host_states[host.name].copy() def get_next_task_for_host(self, host, peek=False): display.debug("getting the next task for host %s" % host.name) s = self.get_host_state(host) task = None if s.run_state == self.ITERATING_COMPLETE: display.debug("host %s is done iterating, returning" % host.name) return (s, None) old_s = s (s, task) = self._get_next_task_from_state(s, host=host, peek=peek) def _roles_are_different(ra, rb): if ra != rb: return True else: return old_s.cur_dep_chain != task._block.get_dep_chain() if task and task._role: # if we had a current role, mark that role as completed if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek: s.cur_role._completed[host.name] = True s.cur_role = task._role s.cur_dep_chain = task._block.get_dep_chain() if not peek: self._host_states[host.name] = s display.debug("done getting next task for host %s" % host.name) display.debug(" ^ task is: %s" % task) display.debug(" ^ state is: %s" % s) return (s, task) def _get_next_task_from_state(self, state, host, peek): task = None # try and find the next task, given the current state. while True: # try to get the current block from the list of blocks, and # if we run past the end of the list we know we're done with # this block try: block = state._blocks[state.cur_block] except IndexError: state.run_state = self.ITERATING_COMPLETE return (state, None) if state.run_state == self.ITERATING_SETUP: # First, we check to see if we were pending setup. If not, this is # the first trip through ITERATING_SETUP, so we set the pending_setup # flag and try to determine if we do in fact want to gather facts for # the specified host. if not state.pending_setup: state.pending_setup = True # Gather facts if the default is 'smart' and we have not yet # done it for this host; or if 'explicit' and the play sets # gather_facts to True; or if 'implicit' and the play does # NOT explicitly set gather_facts to False. gathering = C.DEFAULT_GATHERING implied = self._play.gather_facts is None or boolean(self._play.gather_facts) if (gathering == 'implicit' and implied) or \ (gathering == 'explicit' and boolean(self._play.gather_facts)) or \ (gathering == 'smart' and implied and not host._gathered_facts): # The setup block is always self._blocks[0], as we inject it # during the play compilation in __init__ above. setup_block = self._blocks[0] if setup_block.has_tasks() and len(setup_block.block) > 0: task = setup_block.block[0] if not peek: # mark the host as having gathered facts, because we're # returning the setup task to be executed host.set_gathered_facts(True) else: # This is the second trip through ITERATING_SETUP, so we clear # the flag and move onto the next block in the list while setting # the run state to ITERATING_TASKS state.pending_setup = False state.run_state = self.ITERATING_TASKS if not state.did_start_at_task: state.cur_block += 1 state.cur_regular_task = 0 state.cur_rescue_task = 0 state.cur_always_task = 0 state.child_state = None elif state.run_state == self.ITERATING_TASKS: # clear the pending setup flag, since we're past that and it didn't fail if state.pending_setup: state.pending_setup = False # First, we check for a child task state that is not failed, and if we # have one recurse into it for the next task. If we're done with the child # state, we clear it and drop back to geting the next task from the list. if state.tasks_child_state: (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek) if self._check_failed_state(state.tasks_child_state): # failed child state, so clear it and move into the rescue portion state.tasks_child_state = None self._set_failed_state(state) else: # get the next task recursively if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE: # we're done with the child state, so clear it and continue # back to the top of the loop to get the next task state.tasks_child_state = None continue else: # First here, we check to see if we've failed anywhere down the chain # of states we have, and if so we move onto the rescue portion. Otherwise, # we check to see if we've moved past the end of the list of tasks. If so, # we move into the always portion of the block, otherwise we get the next # task from the list. if self._check_failed_state(state): state.run_state = self.ITERATING_RESCUE elif state.cur_regular_task >= len(block.block): state.run_state = self.ITERATING_ALWAYS else: task = block.block[state.cur_regular_task] # if the current task is actually a child block, create a child # state for us to recurse into on the next pass if isinstance(task, Block) or state.tasks_child_state is not None: state.tasks_child_state = HostState(blocks=[task]) state.tasks_child_state.run_state = self.ITERATING_TASKS state.tasks_child_state.cur_role = state.cur_role # since we've created the child state, clear the task # so we can pick up the child state on the next pass task = None state.cur_regular_task += 1 elif state.run_state == self.ITERATING_RESCUE: # The process here is identical to ITERATING_TASKS, except instead # we move into the always portion of the block. if state.rescue_child_state: (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek) if self._check_failed_state(state.rescue_child_state): state.rescue_child_state = None self._set_failed_state(state) else: if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE: state.rescue_child_state = None continue else: if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: state.run_state = self.ITERATING_ALWAYS elif state.cur_rescue_task >= len(block.rescue): if len(block.rescue) > 0: state.fail_state = self.FAILED_NONE state.run_state = self.ITERATING_ALWAYS else: task = block.rescue[state.cur_rescue_task] if isinstance(task, Block) or state.rescue_child_state is not None: state.rescue_child_state = HostState(blocks=[task]) state.rescue_child_state.run_state = self.ITERATING_TASKS state.rescue_child_state.cur_role = state.cur_role task = None state.cur_rescue_task += 1 elif state.run_state == self.ITERATING_ALWAYS: # And again, the process here is identical to ITERATING_TASKS, except # instead we either move onto the next block in the list, or we set the # run state to ITERATING_COMPLETE in the event of any errors, or when we # have hit the end of the list of blocks. if state.always_child_state: (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek) if self._check_failed_state(state.always_child_state): state.always_child_state = None self._set_failed_state(state) else: if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE: state.always_child_state = None else: if state.cur_always_task >= len(block.always): if state.fail_state != self.FAILED_NONE: state.run_state = self.ITERATING_COMPLETE else: state.cur_block += 1 state.cur_regular_task = 0 state.cur_rescue_task = 0 state.cur_always_task = 0 state.run_state = self.ITERATING_TASKS state.tasks_child_state = None state.rescue_child_state = None state.always_child_state = None else: task = block.always[state.cur_always_task] if isinstance(task, Block) or state.always_child_state is not None: state.always_child_state = HostState(blocks=[task]) state.always_child_state.run_state = self.ITERATING_TASKS state.always_child_state.cur_role = state.cur_role task = None state.cur_always_task += 1 elif state.run_state == self.ITERATING_COMPLETE: return (state, None) # if something above set the task, break out of the loop now if task: break return (state, task) def _set_failed_state(self, state): if state.run_state == self.ITERATING_SETUP: state.fail_state |= self.FAILED_SETUP state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_TASKS: if state.tasks_child_state is not None: state.tasks_child_state = self._set_failed_state(state.tasks_child_state) else: state.fail_state |= self.FAILED_TASKS if state._blocks[state.cur_block].rescue: state.run_state = self.ITERATING_RESCUE elif state._blocks[state.cur_block].always: state.run_state = self.ITERATING_ALWAYS else: state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_RESCUE: if state.rescue_child_state is not None: state.rescue_child_state = self._set_failed_state(state.rescue_child_state) else: state.fail_state |= self.FAILED_RESCUE if state._blocks[state.cur_block].always: state.run_state = self.ITERATING_ALWAYS else: state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_ALWAYS: if state.always_child_state is not None: state.always_child_state = self._set_failed_state(state.always_child_state) else: state.fail_state |= self.FAILED_ALWAYS state.run_state = self.ITERATING_COMPLETE return state def mark_host_failed(self, host): s = self.get_host_state(host) display.debug("marking host %s failed, current state: %s" % (host, s)) s = self._set_failed_state(s) display.debug("^ failed state is now: %s" % s) self._host_states[host.name] = s def get_failed_hosts(self): return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state)) def _check_failed_state(self, state): if state is None: return False elif state.fail_state != self.FAILED_NONE: if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0: return False else: return True elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): cur_block = self._blocks[state.cur_block] if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0: return False else: return True elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): return True elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): return True return False def is_failed(self, host): s = self.get_host_state(host) return self._check_failed_state(s) def get_original_task(self, host, task): ''' Finds the task in the task list which matches the UUID of the given task. The executor engine serializes/deserializes objects as they are passed through the different processes, and not all data structures are preserved. This method allows us to find the original task passed into the executor engine. ''' def _search_block(block): ''' helper method to check a block's task lists (block/rescue/always) for a given task uuid. If a Block is encountered in the place of a task, it will be recursively searched (this happens when a task include inserts one or more blocks into a task list). ''' for b in (block.block, block.rescue, block.always): for t in b: if isinstance(t, Block): res = _search_block(t) if res: return res elif t._uuid == task._uuid: return t return None def _search_state(state): for block in state._blocks: res = _search_block(block) if res: return res for child_state in (state.tasks_child_state, state.rescue_child_state, state.always_child_state): if child_state is not None: res = _search_state(child_state) if res: return res return None s = self.get_host_state(host) res = _search_state(s) if res: return res for block in self._play.handlers: res = _search_block(block) if res: return res return None def _insert_tasks_into_state(self, state, task_list): # if we've failed at all, or if the task list is empty, just return the current state if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list: return state if state.run_state == self.ITERATING_TASKS: if state.tasks_child_state: state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.block[:state.cur_regular_task] after = target_block.block[state.cur_regular_task:] target_block.block = before + task_list + after state._blocks[state.cur_block] = target_block elif state.run_state == self.ITERATING_RESCUE: if state.rescue_child_state: state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.rescue[:state.cur_rescue_task] after = target_block.rescue[state.cur_rescue_task:] target_block.rescue = before + task_list + after state._blocks[state.cur_block] = target_block elif state.run_state == self.ITERATING_ALWAYS: if state.always_child_state: state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.always[:state.cur_always_task] after = target_block.always[state.cur_always_task:] target_block.always = before + task_list + after state._blocks[state.cur_block] = target_block return state def add_tasks(self, host, task_list): self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list) ansible-2.1.1.0/lib/ansible/executor/playbook_executor.py0000664000175400017540000002756612746444466024616 0ustar jenkinsjenkins00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.compat.six import string_types from ansible import constants as C from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.template import Templar from ansible.utils.path import makedirs_safe from ansible.utils.unicode import to_unicode, to_str try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir(os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-