magnum-6.1.0/0000775000175100017510000000000013244017675013037 5ustar zuulzuul00000000000000magnum-6.1.0/requirements.txt0000666000175100017510000000404313244017343016316 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Despite above warning added by global sync process, please use # ascii betical order. Babel!=2.4.0,>=2.3.4 # BSD PyYAML>=3.10 # MIT SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT WSME>=0.8.0 # MIT WebOb>=1.7.1 # MIT alembic>=0.8.10 # MIT cliff!=2.9.0,>=2.8.0 # Apache-2.0 decorator>=3.4.0 # BSD docker>=2.4.2 # Apache-2.0 enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT iso8601>=0.1.11 # MIT jsonpatch!=1.20,>=1.16 # BSD keystoneauth1>=3.3.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 kubernetes>=4.0.0 # Apache-2.0 marathon!=0.9.1,>=0.8.6 # MIT netaddr>=0.7.18 # BSD oslo.concurrency>=3.25.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.versionedobjects>=1.31.2 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 pbr!=2.1.0,>=2.0.0 # Apache-2.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD pycadf!=2.0.0,>=1.1.0 # Apache-2.0 python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 python-heatclient>=1.10.0 # Apache-2.0 python-neutronclient>=6.3.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 python-keystoneclient>=3.8.0 # Apache-2.0 requests>=2.14.2 # Apache-2.0 setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=16.0 # PSF/ZPL six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 taskflow>=2.16.0 # Apache-2.0 cryptography!=2.0,>=1.9 # BSD/Apache-2.0 Werkzeug>=0.7 # BSD License magnum-6.1.0/devstack/0000775000175100017510000000000013244017675014643 5ustar zuulzuul00000000000000magnum-6.1.0/devstack/README.rst0000666000175100017510000000210213244017334016317 0ustar zuulzuul00000000000000==================== DevStack Integration ==================== This directory contains the files necessary to integrate magnum with devstack. Refer the quickstart guide at http://docs.openstack.org/developer/magnum/dev/quickstart.html for more information on using devstack and magnum. Running devstack with magnum for the first time may take a long time as it needs to download the Fedora Atomic qcow2 image (see http://www.projectatomic.io/download/). To install magnum into devstack, add the following settings to enable the magnum plugin:: cat > /opt/stack/devstack/local.conf << END [[local|localrc]] enable_plugin heat https://github.com/openstack/heat master enable_plugin magnum https://github.com/openstack/magnum master END Additionally, you might need additional Neutron configurations for your environment. Please refer to the devstack documentation [#devstack_neutron]_ for details. Then run devstack normally:: cd /opt/stack/devstack ./stack.sh .. [#devstack_neutron] https://docs.openstack.org/developer/devstack/guides/neutron.html magnum-6.1.0/devstack/plugin.sh0000777000175100017510000000307013244017334016472 0ustar zuulzuul00000000000000# magnum.sh - Devstack extras script to install magnum # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo_summary "magnum's plugin.sh was called..." source $DEST/magnum/devstack/lib/magnum (set -o posix; set) if is_service_enabled magnum-api magnum-cond; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing magnum" install_magnum MAGNUM_GUEST_IMAGE_URL=${MAGNUM_GUEST_IMAGE_URL:-"https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2"} IMAGE_URLS+=",${MAGNUM_GUEST_IMAGE_URL}" LIBS_FROM_GIT="${LIBS_FROM_GIT},python-magnumclient" install_magnumclient cleanup_magnum elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring magnum" configure_magnum # Hack a large timeout for now iniset /etc/keystone/keystone.conf token expiration 7200 if is_service_enabled key; then create_magnum_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize magnum init_magnum magnum_register_image # Start the magnum API and magnum taskmgr components echo_summary "Starting magnum" start_magnum configure_iptables_magnum configure_apache_magnum fi if [[ "$1" == "unstack" ]]; then stop_magnum fi if [[ "$1" == "clean" ]]; then cleanup_magnum fi fi # Restore xtrace $XTRACE magnum-6.1.0/devstack/lib/0000775000175100017510000000000013244017675015411 5ustar zuulzuul00000000000000magnum-6.1.0/devstack/lib/magnum0000666000175100017510000003505213244017334016617 0ustar zuulzuul00000000000000#!/bin/bash # # lib/magnum # Functions to control the configuration and operation of the **magnum** service # Dependencies: # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # # - install_magnum # - configure_magnum # - create_magnum_conf # - init_magnum # - magnum_register_image # - start_magnum # - configure_iptables_magnum # - configure_apache_magnum # - stop_magnum # - cleanup_magnum # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories MAGNUM_REPO=${MAGNUM_REPO:-${GIT_BASE}/openstack/magnum.git} MAGNUM_BRANCH=${MAGNUM_BRANCH:-master} MAGNUM_DIR=$DEST/magnum GITREPO["python-magnumclient"]=${MAGNUMCLIENT_REPO:-${GIT_BASE}/openstack/python-magnumclient.git} GITBRANCH["python-magnumclient"]=${MAGNUMCLIENT_BRANCH:-master} GITDIR["python-magnumclient"]=$DEST/python-magnumclient MAGNUM_STATE_PATH=${MAGNUM_STATE_PATH:=$DATA_DIR/magnum} MAGNUM_AUTH_CACHE_DIR=${MAGNUM_AUTH_CACHE_DIR:-/var/cache/magnum} MAGNUM_CONF_DIR=/etc/magnum MAGNUM_CONF=$MAGNUM_CONF_DIR/magnum.conf MAGNUM_API_PASTE=$MAGNUM_CONF_DIR/api-paste.ini MAGNUM_POLICY=$MAGNUM_CONF_DIR/policy.yaml if is_ssl_enabled_service "magnum" || is_service_enabled tls-proxy; then MAGNUM_SERVICE_PROTOCOL="https" fi # Public facing bits MAGNUM_SERVICE_HOST=${MAGNUM_SERVICE_HOST:-$HOST_IP} MAGNUM_SERVICE_PORT=${MAGNUM_SERVICE_PORT:-9511} MAGNUM_SERVICE_PORT_INT=${MAGNUM_SERVICE_PORT_INT:-19511} MAGNUM_SERVICE_PROTOCOL=${MAGNUM_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD=${MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD:-secret} MAGNUM_SWIFT_REGISTRY_CONTAINER=${MAGNUM_SWIFT_REGISTRY_CONTAINER:-docker_registry} # Support entry points installation of console scripts if [[ -d $MAGNUM_DIR/bin ]]; then MAGNUM_BIN_DIR=$MAGNUM_DIR/bin else MAGNUM_BIN_DIR=$(get_python_exec_prefix) fi MAGNUM_CONFIGURE_IPTABLES=${MAGNUM_CONFIGURE_IPTABLES:-True} # Functions # --------- # Test if any magnum services are enabled # is_magnum_enabled function is_magnum_enabled { [[ ,${ENABLED_SERVICES} =~ ,"magnum-" ]] && return 0 return 1 } # cleanup_magnum() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_magnum { sudo rm -rf $MAGNUM_STATE_PATH $MAGNUM_AUTH_CACHE_DIR } # configure_magnum() - Set config files, create data dirs, etc function configure_magnum { # Put config files in ``/etc/magnum`` for everyone to find if [[ ! -d $MAGNUM_CONF_DIR ]]; then sudo mkdir -p $MAGNUM_CONF_DIR sudo chown $STACK_USER $MAGNUM_CONF_DIR fi # Rebuild the config file from scratch create_magnum_conf create_api_paste_conf } # create_magnum_accounts() - Set up common required magnum accounts # # Project User Roles # ------------------------------------------------------------------ # SERVICE_PROJECT_NAME magnum service function create_magnum_accounts { create_service_user "magnum" "admin" local magnum_service=$(get_or_create_service "magnum" \ "container-infra" "Container Infrastructure Management Service") get_or_create_endpoint $magnum_service \ "$REGION_NAME" \ "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" \ "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" \ "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" } # create_magnum_conf() - Create a new magnum.conf file function create_magnum_conf { # (Re)create ``magnum.conf`` rm -f $MAGNUM_CONF HOSTNAME=`hostname` iniset $MAGNUM_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $MAGNUM_CONF DEFAULT transport_url \ "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST" iniset $MAGNUM_CONF DEFAULT host "$HOSTNAME" iniset $MAGNUM_CONF database connection `database_connection_url magnum` iniset $MAGNUM_CONF api host "$MAGNUM_SERVICE_HOST" if is_service_enabled tls-proxy; then iniset $MAGNUM_CONF api port "$MAGNUM_SERVICE_PORT_INT" iniset $MAGNUM_CONF drivers verify_ca true iniset $MAGNUM_CONF drivers openstack_ca_file $SSL_BUNDLE_FILE else iniset $MAGNUM_CONF api port "$MAGNUM_SERVICE_PORT" iniset $MAGNUM_CONF drivers verify_ca false fi iniset $MAGNUM_CONF oslo_policy policy_file $MAGNUM_POLICY iniset $MAGNUM_CONF keystone_auth auth_type password iniset $MAGNUM_CONF keystone_auth username magnum iniset $MAGNUM_CONF keystone_auth password $SERVICE_PASSWORD iniset $MAGNUM_CONF keystone_auth project_name $SERVICE_PROJECT_NAME iniset $MAGNUM_CONF keystone_auth project_domain_id default iniset $MAGNUM_CONF keystone_auth user_domain_id default # FIXME(pauloewerton): keystone_authtoken section is deprecated. Remove it # after deprecation period. iniset $MAGNUM_CONF keystone_authtoken admin_user magnum iniset $MAGNUM_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $MAGNUM_CONF keystone_authtoken admin_tenant_name $SERVICE_PROJECT_NAME configure_auth_token_middleware $MAGNUM_CONF magnum $MAGNUM_AUTH_CACHE_DIR iniset $MAGNUM_CONF keystone_auth auth_url $KEYSTONE_AUTH_URI_V3 iniset $MAGNUM_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI_V3 iniset $MAGNUM_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI_V3 iniset $MAGNUM_CONF keystone_authtoken auth_version v3 if is_fedora || is_suse; then # magnum defaults to /usr/local/bin, but fedora and suse pip like to # install things in /usr/bin iniset $MAGNUM_CONF DEFAULT bindir "/usr/bin" fi if [ -n "$MAGNUM_STATE_PATH" ]; then iniset $MAGNUM_CONF DEFAULT state_path "$MAGNUM_STATE_PATH" iniset $MAGNUM_CONF oslo_concurrency lock_path "$MAGNUM_STATE_PATH" fi if [ "$SYSLOG" != "False" ]; then iniset $MAGNUM_CONF DEFAULT use_syslog "True" fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $MAGNUM_CONF DEFAULT else # Show user_name and project_name instead of user_id and project_id iniset $MAGNUM_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi # Register SSL certificates if provided if is_ssl_enabled_service magnum; then ensure_certificates MAGNUM iniset $MAGNUM_CONF DEFAULT ssl_cert_file "$MAGNUM_SSL_CERT" iniset $MAGNUM_CONF DEFAULT ssl_key_file "$MAGNUM_SSL_KEY" iniset $MAGNUM_CONF DEFAULT enabled_ssl_apis "$MAGNUM_ENABLED_APIS" fi if is_service_enabled ceilometer; then iniset $MAGNUM_CONF oslo_messaging_notifications driver "messaging" fi if is_service_enabled barbican; then iniset $MAGNUM_CONF certificates cert_manager_type "barbican" else iniset $MAGNUM_CONF certificates cert_manager_type "x509keypair" fi trustee_domain_id=$(get_or_create_domain magnum 'Owns users and projects created by magnum') trustee_domain_admin_id=$(get_or_create_user trustee_domain_admin $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD $trustee_domain_id) openstack --os-auth-url $KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version 3 role add \ --user $trustee_domain_admin_id --domain $trustee_domain_id \ admin iniset $MAGNUM_CONF trust cluster_user_trust True iniset $MAGNUM_CONF trust trustee_domain_name magnum iniset $MAGNUM_CONF trust trustee_domain_admin_name trustee_domain_admin iniset $MAGNUM_CONF trust trustee_domain_admin_password $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD iniset $MAGNUM_CONF trust trustee_keystone_interface public iniset $MAGNUM_CONF cinder_client region_name $REGION_NAME if is_service_enabled swift; then iniset $MAGNUM_CONF docker_registry swift_region $REGION_NAME iniset $MAGNUM_CONF docker_registry swift_registry_container $MAGNUM_SWIFT_REGISTRY_CONTAINER fi # Get the default volume type from cinder.conf and set the coresponding # default in magnum.conf default_volume_type=$(iniget /etc/cinder/cinder.conf DEFAULT default_volume_type) iniset $MAGNUM_CONF cinder default_docker_volume_type $default_volume_type iniset $MAGNUM_CONF drivers send_cluster_metrics False } function create_api_paste_conf { # copy api_paste.ini cp $MAGNUM_DIR/etc/magnum/api-paste.ini $MAGNUM_API_PASTE } # create_magnum_cache_dir() - Part of the init_magnum() process function create_magnum_cache_dir { # Create cache dir sudo mkdir -p $MAGNUM_AUTH_CACHE_DIR sudo chown $STACK_USER $MAGNUM_AUTH_CACHE_DIR rm -f $MAGNUM_AUTH_CACHE_DIR/* } # init_magnum() - Initialize databases, etc. function init_magnum { # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled magnum-api; then # (Re)create magnum database recreate_database magnum # Migrate magnum database $MAGNUM_BIN_DIR/magnum-db-manage upgrade fi create_magnum_cache_dir } # magnum_register_image - Register heat image for magnum with property os_distro function magnum_register_image { local magnum_image_property="--property os_distro=" local atomic="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io 'atomic' || true;)" if [ ! -z "$atomic" ]; then magnum_image_property=$magnum_image_property"fedora-atomic" fi local ubuntu="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "ubuntu" || true;)" if [ ! -z "$ubuntu" ]; then magnum_image_property=$magnum_image_property"ubuntu" fi local coreos="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "coreos" || true;)" if [ ! -z "$coreos" ]; then magnum_image_property=$magnum_image_property"coreos" fi # os_distro property for fedora ironic image local fedora_ironic="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -i "ironic" \ | grep -io "fedora" || true;)" if [ ! -z "$fedora_ironic" ]; then magnum_image_property=$magnum_image_property"fedora" fi # get the image name local image_filename=$(basename "$MAGNUM_GUEST_IMAGE_URL") local image_name="" for extension in "tgz" "img" "qcow2" "iso" "vhd" "vhdx" "tar.gz" "img.gz" "img.bz2" "vhd.gz" "vhdx.gz" do if [ $(expr match "${image_filename}" ".*\.${extension}$") -ne 0 ]; then image_name=$(basename "$image_filename" ".${extension}") break fi done if [ -z ${image_name} ]; then echo "Unknown image extension in $image_filename, supported extensions: tgz, img, qcow2, iso, vhd, vhdx, tar.gz, img.gz, img.bz2, vhd.gz, vhdx.gz"; false fi openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT --os-image-api-version 2 image set $image_name $magnum_image_property } # install_magnumclient() - Collect source and prepare function install_magnumclient { if use_library_from_git "python-magnumclient"; then git_clone_by_name "python-magnumclient" setup_dev_lib "python-magnumclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-magnumclient"]}/tools/,/etc/bash_completion.d/}magnum.bash_completion fi } # install_magnum() - Collect source and prepare function install_magnum { git_clone $MAGNUM_REPO $MAGNUM_DIR $MAGNUM_BRANCH setup_develop $MAGNUM_DIR } # start_magnum_api() - Start the API process ahead of other things function start_magnum_api { # Get right service port for testing local service_port=$MAGNUM_SERVICE_PORT local service_protocol=$MAGNUM_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$MAGNUM_SERVICE_PORT_INT service_protocol="http" fi run_process magnum-api "$MAGNUM_BIN_DIR/magnum-api" echo "Waiting for magnum-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$MAGNUM_SERVICE_HOST:$service_port; then die $LINENO "magnum-api did not start" fi # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy magnum '*' $MAGNUM_SERVICE_PORT $MAGNUM_SERVICE_HOST $MAGNUM_SERVICE_PORT_INT & fi } # configure_iptables_magnum() - Configure the IP table rules for Magnum function configure_iptables_magnum { if [ "$MAGNUM_CONFIGURE_IPTABLES" != "False" ]; then ROUTE_TO_INTERNET=$(ip route get 8.8.8.8) OBOUND_DEV=$(echo ${ROUTE_TO_INTERNET#*dev} | awk '{print $1}') sudo iptables -t nat -A POSTROUTING -o $OBOUND_DEV -j MASQUERADE # bay nodes will access magnum-api (port $MAGNUM_SERVICE_PORT) to get CA certificate. sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $MAGNUM_SERVICE_PORT -j ACCEPT || true # allow access to keystone etc (http and https) sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true fi } function configure_apache_magnum { # Set redirection for kubernetes openstack cloud provider # FIXME: When [1] is in kubernetes, we won't need the redirection anymore. # [1] https://github.com/gophercloud/gophercloud/pull/423 HTACCESS_PATH=/var/www/html if is_ubuntu; then OVERRIDE_CONF_FILE=/etc/apache2/apache2.conf elif is_fedora; then OVERRIDE_CONF_FILE=/etc/httpd/conf/httpd.conf fi # If horizon is enabled then we need if is_service_enabled horizon; then HTACCESS_PATH=$DEST/horizon/.blackhole sudo tee -a $APACHE_CONF_DIR/horizon.conf < Options Indexes FollowSymLinks AllowOverride all Require all granted EOF else sudo tee -a $OVERRIDE_CONF_FILE < Options Indexes FollowSymLinks AllowOverride all Require all granted EOF fi sudo mkdir -p $HTACCESS_PATH sudo tee $HTACCESS_PATH/.htaccess <`_ * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Source:** http://git.openstack.org/cgit/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** http://bugs.launchpad.net/magnum * **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient magnum-6.1.0/specs/0000775000175100017510000000000013244017675014154 5ustar zuulzuul00000000000000magnum-6.1.0/specs/container-networking-model.rst0000666000175100017510000004573613244017334022164 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Magnum Container Networking Model ================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/extensible-network-model For Magnum to prosper, the project must support a range of networking tools and techniques, while maintaining a simple, developer-focused user experience. The first step in achieving this goal is to standardize the process of allocating networking to containers, while providing an abstraction for supporting various networking capabilities through pluggable back-end implementations. This document recommends using Docker's libnetwork library to implement container networking abstractions and plugins. Since libnetwork is not a standard and the container ecosystem is rapidly evolving, the Magnum community should continue evaluating container networking options on a frequent basis. Problem Description =================== The container networking ecosystem is undergoing rapid changes. The networking tools and techniques used in today's container deployments are different than twelve months ago and will continue to evolve. For example, Flannel [6]_, Kubernetes preferred networking implementation, was initially released in July of 2014 and was not considered preferred until early 2015. Furthermore, the various container orchestration engines have not standardized on a container networking implementation and may never. For example, Flannel is the preferred container networking implementation for Kubernetes but not for Docker Swarm. Each container networking implementation comes with its own API abstractions, data model, tooling, etc.. Natively supporting each container networking implementation can be a burden on the Magnum community and codebase. By supporting only a subset of container networking implementations, the project may not be widely adopted or may provide a suboptimal user experience. Lastly, Magnum has limited support for advanced container networking functionality. Magnum instantiates container networks behind the scenes through Heat templates, exposing little-to-no user configurability. Some users require the ability to customize their container environments, including networking details. However, networking needs to "just work" for users that require no networking customizations. Roles ----- The following are roles that the Magnum Container Networking Model takes into consideration. Roles are an important reference point when creating user stories. This is because each role provides different functions and has different requirements. 1. Cloud Provider (CP): Provides standard OpenStack cloud infrastructure services, including the Magnum service. 2. Container Service Provider (CSP): Uses Magnum to deliver Containers-as-a-Service (CaaS) to users. CSPs are a consumer of CP services and a CaaS provider to users. 3. Users: Consume Magnum services to provision and manage clustered container environments and deploy apps within the container clusters. The container ecosystem focuses on the developer user type. It is imperative that the Magnum Container Networking Model meets the need of this user type. These roles are not mutually exclusive. For example: 1. A CP can also be a CSP. In this case, the CP/CSP provisions and manages standard OpenStack services, the Magnum service, and provides CaaS services to users. 2. A User can also be a CSP. In this case, the user provisions their own baymodels, bays, etc. from the CP. Definitions ----------- COE Container Orchestration Engine Baymodel An object that stores template information about the bay which is used to create new bays consistently. Bay A Magnum resource that includes at least one host to run containers on, and a COE to manage containers created on hosts within the bay. Pod Is the smallest deployable unit that can be created, scheduled, and managed within Kubernetes. Additional Magnum definitions can be found in the Magnum Developer documentation [2]_. Use Cases ---------- This document does not intend to address each use case. The use cases are provided as reference for the long-term development of the Magnum Container Networking Model. As a User: 1. I need to easily deploy containerized apps in an OpenStack cloud. My user experience should be similar to how I deploy containerized apps outside of an OpenStack cloud. 2. I need to have containers communicate with vm-based apps that use OpenStack networking. 3. I need the option to preserve the container's IP address so I can manage containers by IP's, not just ports. 4. I need to block unwanted traffic to/from my containerized apps. 5. I need the ability for my containerized apps to be highly available. 6. I need confidence that my traffic is secure from other tenants traffic. As a CSP: 1. I need to easily deploy a bay for consumption by users. The bay must support the following: A. One or more hosts to run containers. B. The ability to choose between virtual or physical hosts to run containers. C. The ability to automatically provision networking to containers. 2. I need to provide clustering options that support different container/image, formats and technologies. 3. After deploying my initial cluster, I need the ability to provide ongoing management, including: A. The ability to add/change/remove networks that containers connect to. B. The ability to add/change/remove nodes within the cluster. 4. I need to deploy a Bay without admin rights to OpenStack services. 5. I need the freedom to choose different container networking tools and techniques offered by the container ecosystem beyond OpenStack. As a CP: 1. I need to easily and reliably add the Magnum service to my existing OpenStack cloud environment. 2. I need to easily manage (monitor, troubleshoot, etc..) the Magnum service. Including the ability to mirror ports to capture traffic for analysis. 3. I need to make the Magnum services highly-available. 4. I need to make Magnum services highly performant. 5. I need to easily scale-out Magnum services as needed. 6. I need Magnum to be robust regardless of failures within the container orchestration engine. Proposed Changes ================ 1. Currently, Magnum supports Flannel [6]_ as the only multi-host container networking implementation. Although Flannel has become widely accepted for providing networking capabilities to Kubernetes-based container clusters, other networking tools exist and future tools may develop. This document proposes extending Magnum to support specifying a container networking implementation through a combination of user-facing baymodel configuration flags. Configuration parameters that are common across Magnum or all networking implementations will be exposed as unique flags. For example, a flag named network-driver can be used to instruct Magnum which network driver to use for implementing a baymodel container/pod network. network driver examples may include: flannel, weave, calico, midonet, netplugin, etc.. Here is an example of creating a baymodel that uses Flannel as the network driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --network-driver flannel If no network-driver parameter is supplied by the user, the baymodel is created using the default network driver of the specified Magnum COE. Each COE must support a default network driver and each driver must provide reasonable default configurations that allow users to instantiate a COE without supplying labels. The default network driver for each COE should be consistent with existing Magnum default settings. Where current defaults do not exist, the defaults should be consistent with upstream network driver projects. 2. Each network driver supports a range of configuration parameters that should be observed by Magnum. This document suggests using an attribute named "labels" for supplying driver-specific configuration parameters. Labels consist of one or more arbitrary key/value pairs. Here is an example of using labels to change default settings of the Flannel network driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id ${NIC_ID} \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --network-driver flannel \ --labels flannel_network_cidr=10.0.0.0/8,\ flannel_network_subnetlen=22,\ flannel_backend=vxlan With Magnum's current implementation, this document would support labels for the Kubernetes COE type. However, labels are applicable beyond Kubernetes, as the Docker daemon, images and containers now support labels as a mechanism for providing custom metadata. The labels attribute within Magnum should be extended beyond Kubernetes pods, so a single mechanism can be used to pass arbitrary metadata throughout the entire system. A blueprint [2]_ has been registered to expand the scope of labels for Magnum. This document intends on adhering to the expand-labels-scope blueprint. Note: Support for daemon-labels was added in Docker 1.4.1. Labels for containers and images were introduced in Docker 1.6.0 If the --network-driver flag is specified without any labels, default configuration values of the driver will be used by the baymodel. These defaults are set within the Heat template of the associated COE. Magnum should ignore label keys and/or values not understood by any of the templates during the baymodel operation. Magnum will continue to CRUD bays in the same way: magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 3. Update python-magnumclient to understand the new Container Networking Model attributes. The client should also be updated to support passing the --labels flag according to the expand-labels-scope blueprint [2]_. 4. Update the conductor template definitions to support the new Container Networking Model attributes. 5. Refactor Heat templates to support the Magnum Container Networking Model. Currently, Heat templates embed Flannel-specific configuration within top-level templates. For example, the top-level Kubernetes Heat template [8]_ contains the flannel_network_subnetlen parameter. Network driver specific configurations should be removed from all top-level templates and instead be implemented in one or more template fragments. As it relates to container networking, top-level templates should only expose the labels and generalized parameters such as network-driver. Heat templates, template definitions and definition entry points should be suited for composition, allowing for a range of supported labels. This document intends to follow the refactor-heat-templates blueprint [3]_ to achieve this goal. 6. Update unit and functional tests to support the new attributes of the Magnum Container Networking Model. 7. The spec will not add support for natively managing container networks. Due to each network driver supporting different API operations, this document suggests that Magnum not natively manage container networks at this time and instead leave this job to native tools. References [4]_ [5]_ [6]_ [7]_. provide additional details to common labels operations. 8. Since implementing the expand-labels-scope blueprint [2]_ may take a while, exposing network functionality through baymodel configuration parameters should be considered as an interim solution. Alternatives ------------ 1. Observe all networking configuration parameters, including labels within a configuration file instead of exposing the labels attribute to the user. 2. Only support a single networking implementation such as Flannel. Flannel is currently supported for the Kubernetes COE type. It can be ported to support the swarm COE type. 3. Add support for managing container networks. This will require adding abstractions for each supported network driver or creating an abstraction layer that covers all possible network drivers. 4. Use the Kuryr project [10]_ to provide networking to Magnum containers. Kuryr currently contains no documentation or code, so this alternative is highly unlikely if the Magnum community requires a pluggable container networking implementation in the near future. However, Kuryr could become the long-term solution for container networking within OpenStack. A decision should be made by the Magnum community whether to move forward with Magnum's own container networking model or to wait for Kuryr to mature. In the meantime, this document suggests the Magnum community become involved in the Kuryr project. Data Model Impact ----------------- This document adds the labels and network-driver attribute to the baymodel database table. A migration script will be provided to support the attribute being added. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ | labels | JSONEncodedDict | One or more arbitrary key/value pairs | +-------------------+-----------------+---------------------------------------------+ | network-driver | string | Container networking backend implementation | +-------------------+-----------------+---------------------------------------------+ REST API Impact --------------- This document adds the labels and network-driver attribute to the BayModel API class. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ | labels | JSONEncodedDict | One or more arbitrary key/value pairs | +-------------------+-----------------+---------------------------------------------+ | network-driver | string | Container networking backend implementation | +-------------------+-----------------+---------------------------------------------+ Security Impact --------------- Supporting more than one network driver increases the attack footprint of Magnum. Notifications Impact -------------------- None Other End User Impact --------------------- Most end users will never use the labels configuration flag and simply use the default network driver and associated configuration options. For those that wish to customize their container networking environment, it will be important to understand what network-driver and labels are supported, along with their associated configuration options, capabilities, etc.. Performance Impact ------------------ Performance will depend upon the chosen network driver and its associated configuration. For example, when creating a baymodel with "--network-driver flannel" flag, Flannel's default configuration will be used. If the default for Flannel is an overlay networking technique (i.e. VXLAN), then networking performance will be less than if Flannel used the host-gw configuration that does not perform additional packet encapsulation to/from containers. If additional performance is required when using this driver, Flannel's host-gw configuration option could be exposed by the associated Heat template and instantiated through the labels attribute. Other Deployer Impact --------------------- Currently, container networking and OpenStack networking are different entities. Since no integration exists between the two, deployers/operators will be required to manage each networking environment individually. However, Magnum users will continue to deploy baymodels, bays, containers, etc. without having to specify any networking parameters. This will be accomplished by setting reasonable default parameters within the Heat templates. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: Daneyon Hansen (danehans) Other contributors: Ton Ngo (Tango) Hongbin Lu (hongbin) Work Items ---------- 1. Extend the Magnum API to support new baymodel attributes. 2. Extend the Client API to support new baymodel attributes. 3. Extend baymodel objects to support new baymodel attributes. Provide a database migration script for adding attributes. 4. Refactor Heat templates to support the Magnum Container Networking Model. 5. Update Conductor template definitions and definition entry points to support Heat template refactoring. 6. Extend unit and functional tests to support new baymodel attributes. Dependencies ============ Although adding support for these new attributes does not depend on the following blueprints, it's highly recommended that the Magnum Container Networking Model be developed in concert with the blueprints to maintain development continuity within the project. 1. Common Plugin Framework Blueprint [1]_. 2. Expand the Scope of Labels Blueprint [9]_. 3. Refactor Heat Templates, Definitions and Entry Points Blueprint [3]_. Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the configuration flags introduced by this document. Additionally, background information on how to use these flags will be included. References ========== .. [1] https://blueprints.launchpad.net/magnum/+spec/common-plugin-framework .. [2] http://docs.openstack.org/developer/magnum/ .. [3] https://blueprints.launchpad.net/magnum/+spec/refactor-heat-templates .. [4] https://github.com/docker/libnetwork/blob/master/docs/design.md .. [5] https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/design/networking.md .. [6] https://github.com/coreos/flannel .. [7] https://github.com/coreos/rkt/blob/master/Documentation/networking.md .. [8] https://github.com/openstack/magnum/blob/master/magnum/templates/kubernetes/kubecluster.yaml .. [9] https://blueprints.launchpad.net/magnum/+spec/expand-labels-scope .. [10] https://github.com/openstack/kuryr magnum-6.1.0/specs/async-container-operation.rst0000666000175100017510000004323513244017334022002 0ustar zuulzuul00000000000000================================= Asynchronous Container Operations ================================= Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/async-container-operations At present, container operations are done in a synchronous way, end-to-end. This model does not scale well, and incurs a penalty on the client to be stuck till the end of completion of the operation. Problem Description ------------------- At present Magnum-Conductor executes the container operation as part of processing the request forwarded from Magnum-API. For container-create, if the image needs to be pulled down, it may take a while depending on the responsiveness of the registry, which can be a substantial delay. At the same time, experiments suggest that even for pre-pulled image, the time taken by each operations, namely create/start/delete, are in the same order, as it involves complete turn around between the magnum-client and the COE-API, via Magnum-API and Magnum-Conductor[1]. Use Cases --------- For wider enterprise adoption of Magnum, we need it to scale better. For that we need to replace some of these synchronous behaviors with suitable alternative of asynchronous implementation. To understand the use-case better, we can have a look at the average time spent during container operations, as noted at[1]. Proposed Changes ---------------- The design has been discussed over the ML[6]. The conclusions have been kept on the 'whiteboard' of the Blueprint. The amount of code change is expected to be significant. To ease the process of adoption, code review, functional tests, an approach of phased implementation may be required. We can define the scope of the three phases of the implementation as follows - * Phase-0 will bring in the basic feature of asynchronous mode of operation in Magnum - (A) from API to Conductor and (B) from Conductor to COE-API. During phase-0, this mode will be optional through configuration. Both the communications of (A) and (B) are proposed to be made asynchronous to achieve the best of it. If we do (A) alone, it does not gain us much, as (B) takes up the higher cycles of the operation. If we do (B) alone, it does not make sense, as (A) will synchronously wait for no meaningful data. * Phase-1 will concentrate on making the feature persistent to address various scenarios of conductor restart, worker failure etc. We will support this feature for multiple Conductor-workers in this phase. * Phase-2 will select asynchronous mode of operation as the default mode. At the same time, we can evaluate to drop the code for synchronous mode, too. Phase-0 is required as a meaningful temporary step, to establish the importance and tangible benefits of phase-1. This is also to serve as a proof-of-concept at a lower cost of code changes with a configurable option. This will enable developers and operators to have a taste of the feature, before bringing in the heavier dependencies and changes proposed in phase-1. A reference implementation for the phase-0 items, has been put for review[2]. Following is the summary of the design - 1. Configurable mode of operation - async ----------------------------------------- For ease of adoption, the async_mode of communication between API-conductor, conductor-COE in magnum, can be controlled using a configuration option. So the code-path for sync mode and async mode would co-exist for now. To achieve this with minimal/no code duplication and cleaner interface, we are using openstack/futurist[4]. Futurist interface hides the details of type of executor being used. In case of async configuration, a greenthreadpool of configured poolsize gets created. Here is a sample of how the config would look like: :: [DEFAULT] async_enable = False [conductor] async_threadpool_max_workers = 64 Futurist library is used in oslo.messaging. Thus, it is used by almost all OpenStack projects, in effect. Futurist is very useful to run same code under different execution model and hence saving potential duplication of code. 2. Type of operations --------------------- There are two classes of container operations - one that can be made async, namely create/delete/start/stop/pause/unpause/reboot, which do not need data about the container in return. The other type requires data, namely container-logs. For async-type container-operations, magnum-API will be using 'cast' instead of 'call' from oslo_messaging[5]. 'cast' from oslo.messaging.rpcclient is used to invoke a method and return immediately, whereas 'call' invokes a method and waits for a reply. While operating in asynchronous mode, it is intuitive to use cast method, as the result of the response may not be available immediately. Magnum-api first fetches the details of a container, by doing 'get_rpc_resource'. This function uses magnum objects. Hence, this function uses a 'call' method underneath. Once, magnum-api gets back the details, it issues the container operation next, using another 'call' method. The above proposal is to replace the second 'call' with 'cast'. If user issues a container operation, when there is no listening conductor (because of process failure), there will be a RPC timeout at the first 'call' method. In this case, user will observe the request to get blocked at client and finally fail with HTTP 500 ERROR, after the RPC timeout, which is 60 seconds by default. This behavior is independent of the usage of 'cast' or 'call' for the second message, mentioned above. This behavior does not influence our design, but it is documented here for clarity of understanding. 3. Ensuring the order of execution - Phase-0 -------------------------------------------- Magnum-conductor needs to ensure that for a given bay and given container, the operations are executed in sequence. In phase-0, we want to demonstrate how asynchronous behavior helps scaling. Asynchronous mode of container operations would be supported for single magnum-conductor scenario, in phase-0. If magnum-conductor crashes, there will be no recovery for the operations accepted earlier - which means no persistence in phase-0, for operations accepted by magnum-conductor. Multiple conductor scenario and persistence will be addressed in phase-1 [please refer to the next section for further details]. If COE crashes or does not respond, the error will be detected, as it happens in sync mode, and reflected on the container-status. Magnum-conductor will maintain a job-queue. Job-queue is indexed by bay-id and container-id. A job-queue entry would contain the sequence of operations requested for a given bay-id and container-id, in temporal order. A greenthread will execute the tasks/operations in order for a given job-queue entry, till the queue empties. Using a greenthread in this fashion saves us from the cost and complexity of locking, along with functional correctness. When request for new operation comes in, it gets appended to the corresponding queue entry. For a sequence of container operations, if an intermediate operation fails, we will stop continuing the sequence. The community feels more confident to start with this strictly defensive policy[17]. The failure will be logged and saved into the container-object, which will help an operator be informed better about the result of the sequence of container operations. We may revisit this policy later, if we think it is too restrictive. 4. Ensuring the order of execution - phase-1 -------------------------------------------- The goal is to execute requests for a given bay and a given container in sequence. In phase-1, we want to address persistence and capability of supporting multiple magnum-conductor processes. To achieve this, we will reuse the concepts laid out in phase-0 and use a standard library. We propose to use taskflow[7] for this implementation. Magnum-conductors will consume the AMQP message and post a task[8] on a taskflow jobboard[9]. Greenthreads from magnum-conductors would subscribe to the taskflow jobboard as taskflow-conductors[10]. Taskflow jobboard is maintained with a choice of persistent backend[11]. This will help address the concern of persistence for accepted operations, when a conductor crashes. Taskflow will ensure that tasks, namely container operations, in a job, namely a sequence of operations for a given bay and container, would execute in sequence. We can easily notice that some of the concepts used in phase-0 are reused as it is. For example, job-queue maps to jobboard here, use of greenthread maps to the conductor concept of taskflow. Hence, we expect easier migration from phase-0 to phase-1, with the choice of taskflow. For taskflow jobboard[11], the available choices of backend are Zookeeper and Redis. But, we plan to use MySQL as default choice of backend, for magnum conductor jobboard use-case. This support will be added to taskflow. Later, we may choose to support the flexibility of other backends like ZK/Redis via configuration. But, phase-1 will keep the implementation simple with MySQL backend and revisit this, if required. Let's consider the scenarios of Conductor crashing - - If a task is added to jobboard, and conductor crashes after that, taskflow can assign a particular job to any available greenthread agents from other conductor instances. If the system was running with single magnum-conductor, it will wait for the conductor to come back and join. - A task is picked up and magnum-conductor crashes. In this case, the task is not complete from jobboard point-of-view. As taskflow detects the conductor going away, it assigns another available conductor. - When conductor picks up a message from AMQP, it will acknowledge AMQP, only after persisting it to jobboard. This will prevent losing the message, if conductor crashes after picking up the message from AMQP. Explicit acknowledgement from application may use NotificationResult.HANDLED[12] to AMQP. We may use the at-least-one-guarantee[13] feature in oslo.messaging[14], as it becomes available. To summarize some of the important outcomes of this proposal - - A taskflow job represents the sequence of container operations on a given bay and given container. At a given point of time, the sequence may contain a single or multiple operations. - There will be a single jobboard for all conductors. - Task-flow conductors are multiple greenthreads from a given magnum-conductor. - Taskflow-conductor will run in 'blocking' mode[15], as those greenthreads have no other job than claiming and executing the jobs from jobboard. - Individual jobs are supposed to maintain a temporal sequence. So the taskflow-engine would be 'serial'[16]. - The proposed model for a 'job' is to consist of a temporal sequence of 'tasks' - operations on a given bay and a given container. Henceforth, it is expected that when a given operation, namely container-create is in progress, a request for container-start may come in. Adding the task to the existing job is intuitive to maintain the sequence of operations. To fit taskflow exactly into our use-case, we may need to do two enhancements in taskflow - - Supporting mysql plugin as a DB backend for jobboard. Support for redis exists, so it will be similar. We do not see any technical roadblock for adding mysql support for taskflow jobboard. If the proposal does not get approved by taskflow team, we may have to use redis, as an alternative option. - Support for dynamically adding tasks to a job on jobboard. This also looks feasible, as discussed over the #openstack-state-management [Unfortunately, this channel is not logged, but if we agree in this direction, we can initiate discussion over ML, too] If taskflow team does not allow adding this feature, even though they have agreed now, we will use the dependency feature in taskflow. We will explore and elaborate this further, if it requires. 5. Status of progress --------------------- The progress of execution of a container operation is reflected on the status of a container as - 'create-in-progress', 'delete-in-progress' etc. Alternatives ------------ Without an asynchronous implementation, Magnum will suffer from complaints about poor scalability and slowness. In this design, stack-lock[3] has been considered as an alternative to taskflow. Following are the reasons for preferring taskflow over stack-lock, as of now, - Stack-lock used in Heat is not a library, so it will require making a copy for Magnum, which is not desirable. - Taskflow is relatively mature, well supported, feature-rich library. - Taskflow has in-built capacity to scale out[in] as multiple conductors can join in[out] the cluster. - Taskflow has a failure detection and recovery mechanism. If a process crashes, then worker threads from other conductor may continue the execution. In this design, we describe futurist[4] as a choice of implementation. The choice was to prevent duplication of code for async and sync mode. For this purpose, we could not find any other solution to compare. Data model impact ----------------- Phase-0 has no data model impact. But phase-1 may introduce an additional table into the Magnum database. As per the present proposal for using taskflow in phase-1, we have to introduce a new table for jobboard under magnum db. This table will be exposed to taskflow library as a persistent db plugin. Alternatively, an implementation with stack-lock will also require an introduction of a new table for stack-lock objects. REST API impact --------------- None. Security impact --------------- None. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ Asynchronous mode of operation helps in scalability. Hence, it improves responsiveness and reduces the turn around time in a significant proportion. A small test on devstack, comparing both the modes, demonstrate this with numbers.[1] Other deployer impact --------------------- None. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee suro-patz(Surojit Pathak) Work Items ---------- For phase-0 * Introduce config knob for asynchronous mode of container operations. * Changes for Magnum-API to use CAST instead of CALL for operations eligible for asynchronous mode. * Implement the in-memory job-queue in Magnum conductor, and integrate futurist library. * Unit tests and functional tests for async mode. * Documentation changes. For phase-1 * Get the dependencies on taskflow being resolved. * Introduce jobboard table into Magnum DB. * Integrate taskflow in Magnum conductor to replace the in-memory job-queue with taskflow jobboard. Also, we need conductor greenthreads to subscribe as workers to the taskflow jobboard. * Add unit tests and functional tests for persistence and multiple conductor scenario. * Documentation changes. For phase-2 * We will promote asynchronous mode of operation as the default mode of operation. * We may decide to drop the code for synchronous mode and corresponding config. * Documentation changes. Dependencies ------------ For phase-1, if we choose to implement using taskflow, we need to get following two features added to taskflow first - * Ability to add new task to an existing job on jobboard. * mysql plugin support as persistent DB. Testing ------- All the existing test cases are run to ensure async mode does not break them. Additionally more functional tests and unit tests will be added specific to async mode. Documentation Impact -------------------- Magnum documentation will include a description of the option for asynchronous mode of container operations and its benefits. We will also add to developer documentation on guideline for implementing a container operation in both the modes - sync and async. We will add a section on 'how to debug container operations in async mode'. The phase-0 and phase-1 implementation and their support for single or multiple conductors will be clearly documented for the operators. References ---------- [1] - Execution time comparison between sync and async modes: https://gist.github.com/surojit-pathak/2cbdad5b8bf5b569e755 [2] - Proposed change under review: https://review.openstack.org/#/c/267134/ [3] - Heat's use of stacklock http://docs.openstack.org/developer/heat/_modules/heat/engine/stack_lock.html [4] - openstack/futurist http://docs.openstack.org/developer/futurist/ [5] - openstack/oslo.messaging http://docs.openstack.org/developer/oslo.messaging/rpcclient.html [6] - ML discussion on the design http://lists.openstack.org/pipermail/openstack-dev/2015-December/082524.html [7] - Taskflow library http://docs.openstack.org/developer/taskflow/ [8] - task in taskflow http://docs.openstack.org/developer/taskflow/atoms.html#task [9] - job and jobboard in taskflow http://docs.openstack.org/developer/taskflow/jobs.html [10] - conductor in taskflow http://docs.openstack.org/developer/taskflow/conductors.html [11] - persistent backend support in taskflow http://docs.openstack.org/developer/taskflow/persistence.html [12] - oslo.messaging notification handler http://docs.openstack.org/developer/oslo.messaging/notification_listener.html [13] - Blueprint for at-least-once-guarantee, oslo.messaging https://blueprints.launchpad.net/oslo.messaging/+spec/at-least-once-guarantee [14] - Patchset under review for at-least-once-guarantee, oslo.messaging https://review.openstack.org/#/c/229186/ [15] - Taskflow blocking mode for conductor http://docs.openstack.org/developer/taskflow/conductors.html#taskflow.conductors.backends.impl_executor.ExecutorConductor [16] - Taskflow serial engine http://docs.openstack.org/developer/taskflow/engines.html [17] - Community feedback on policy to handle failure within a sequence http://eavesdrop.openstack.org/irclogs/%23openstack-containers/%23openstack-containers.2016-03-08.log.html#t2016-03-08T20:41:17 magnum-6.1.0/specs/open-dcos.rst0000666000175100017510000001460213244017334016572 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Magnum and Open DC/OS Integration ================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/mesos-dcos Open DC/OS [1]_ is a distributed operating system based on the Apache Mesos distributed systems kernel. It enables the management of multiple machines as if they were a single computer. It automates resource management, schedules process placement, facilitates inter-process communication, and simplifies the installation and management of distributed services. Its included web interface and available command-line interface (CLI) facilitate remote management and monitoring of the cluster and its services. Open DC/OS now supports both docker containerizer and mesos containerizer. The mesos containerizer support both docker and AppC image spec, the mesos containerizer can manage docker containers well even if docker daemon is not running. End user can install Open DC/OS with different ways, such as vagrant, cloud, local etc. For cloud, the Open DC/OS only supports AWS now, end user can deploy a DC/OS cluster quickly with a template. For local install, there are many steps to install a Open DC/OS cluster. Problem Description =================== COEs (Container Orchestration Engines) are the first class citizen in Magnum, there are different COEs in Magnum now including Kubernetes, Swarm and Mesos. All of those COEs are focusing docker container management, the problem is that the concept of container is not only limited in docker container, but also others, such as AppC, linux container etc, Open DC/OS is planning to support different containers by leveraging Mesos unified container feature and the Open DC/OS has a better management console for container orchestration. Currently, Magnum provides limited support for Mesos Bay as there is only one framework named as Marathon running on top of Mesos. Compared with Open DC/OS, the current Mesos Bay lack the following features: 1. App Store for application management. The Open DC/OS has a universe to provide app store functions. 2. Different container technology support. The Open DC/OS support different container technologies, such as docker, AppC etc, and may introduce OCI support in future. Introducing Open DC/OS Bay can enable Magnum to support more container technologies. 3. Better external storage integration. The Open DC/OS is planning to introduce docker volume isolator support in next release, the docker volume isolator is leveraging docker volume driver API to integrate with 3rd party distributed storage platforms, such as OpenStack Cinder, GlusterFS, Ceph etc. 4. Better network management. The Open DC/OS is planning to introduce CNI network isolator in next release, the CNI network isolator is leveraging CNI technologies to manage network for containers. 5. Loosely coupled with docker daemon. The Open DC/OS can work well for docker container even if docker daemon is not running. The docker daemon now have some issues in large scale cluster, so this approach avoids the limitation of the docker daemon but still can enable end user get some docker features in large scale cluster. Proposed Changes ================ We propose extending Magnum as follows. 1. Leverage bay driver work and structure this new COE as a bay driver. 2. Leverage mesos-slave-flags [3]_ to customize Open DC/OS. Here is an example of creating an Open DC/OS baymodel that uses docker/volume as isolator, linux as launcher and docker as image provider: :: magnum baymodel-create --name dcosbaymodel \ --image-id dcos-centos-7.2 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe dcos \ --labels isolation=docker/volume,\ launcher=linux, \ image_providers=docker Magnum will validate the labels together with the driver specified before creating the bay and will return an error if the validation fails. Magnum will continue to CRUD bays in the same way: magnum bay-create --name dcosbay --baymodel dcosbaymodel --node-count 1 3. Keep the old Mesos Bay and add a new Open DC/OS Bay. Once the Open DC/OS Bay is stable, deprecate the Mesos Bay. 4. Update unit and functional tests to support Open DC/OS Bay, it is also an option to verify the Open DC/OS Bay in gate. 5. Preserve the user experience by ensuring that any operation on Open DC/OS Bay will be identical between a COE deployed by Magnum and a COE deployed by other methods. REST API Impact --------------- There will be no REST API exposed from Magnum for end user to operate Open DC/OS, end user can logon to Open DC/OS dashboard or call Open DC/OS REST API directly to manage the containers or the applications. Implementation ============== Assignee(s) ----------- Primary assignee: - Guang Ya Liu (jay-lau-513) Other contributors: - Qun Wang (wangqun) - Gao Jin Cao Work Items ---------- 1. Build VM image for Open DC/OS Bay. 2. Add Open DC/OS Bay driver. 3. Add Heat template for Open DC/OS Bay. 4. Add Open DC/OS Bay monitor. 5. Document how to use the Open DC/OS Bay. Dependencies ============ 1. This blueprint will focus on running on Open DC/OS in CentOS 7.2. 2. Depend on blueprint https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the Open DC/OS Bay introduced by including a short example and a full documentation with all the explanation for the labels in the user guide. Additionally, background information on how to use the Open DC/OS Bay will be included. References ========== .. [1] https://dcos.io/docs/1.7/overview/what-is-dcos/ .. [2] https://dcos.io/install/ .. [3] https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags magnum-6.1.0/specs/containers-service.rst0000666000175100017510000004637213244017334020517 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================== Containers Service ================== Launchpad blueprint: https://blueprints.launchpad.net/nova/+spec/containers-service Containers share many features in common with Nova instances. For the common features, virt drivers for Nova can be used to surface basic instance functionality. For features that go beyond what can be naturally fit within a virt driver, we propose a new API service that allows for advanced features to be added without conflating the worlds of instances and containers. Some examples of containers specific features are setting of shell environment variables, and accepting a shell command to execute at runtime. Capturing the STDIO of the process(es) within a container, and tracking the return status of processes are all beyond the scope of what was contemplated for Nova. All of these features will be implemented in the Containers Service. Problem description =================== Container technology is rapidly gaining popularity as a way to bundle and deploy applications. Recognizing and adapting to this trend will position OpenStack to be useful not only to clouds that employ bare metal and virtual machine instances, but can remain competitive in offering container services as well. Nova's concepts of an instance, and the actions that may be taken on it do not match completely with containers. Use cases --------- 1. App Consolidation. End-user wants to run multiple small applications in separate operating system environments, but wants to optimize for efficiency to control hosting costs. Each application belongs to the same tenant, so security isolation between applications is nice-to-have but not critical. Isolation is desired primarily for simplified management of the execution environment for each application. 2. App Portability. End-user wants to create a single container image, and deploy the same image to multiple hosting environments, including OpenStack. Other environments may include local servers, dedicated servers, private clouds, and public clouds. Switching environments requires passing database connection strings by environment variables at the time a container starts to allow the application to use the services available in each environment without changing the container image. 3. Docker Compatibility. End-user has a Dockerfile used to build an application and its runtime environment and dependencies in a Docker container image. They want an easy way to run the Docker resulting image on an OpenStack cloud. 4. LXC Compatibility. End-user wants an easy way to remotely create multiple LXC containers within a single Nova instance. 5. OpenVZ Compatibility. End-user wants an easy way to remotely create multiple OpenVZ containers within a single Nova instance. 6. Containers-Centric World View. End-user wants to communicate with a single OpenStack API, and request the addition of containers, without the need to be concerned with keeping track of how many containers are already running on a given Nova instance, and when more need to be created. They want to simply create and remove containers, and allow the appropriate resource scheduling to happen automatically. 7. Platform Integration. Cloud operator already has an OpenStack cloud, and wants to add a service/application centric management system on top. Examples of such systems are Cloud Foundry, Kubernetes, Apache Mesos, etc. The selected system is already Docker compatible. Allow this cloud operator easy integration with OpenStack to run applications in containers. The Cloud Operator now harnesses the power of both the management system, and OpenStack, and does not need to manage a second infrastructure for his/her application hosting needs. All details involving the integration of containers with Nova instances is managed by OpenStack. 8. Container network. End-user wants to define a custom overlay network for containers, and wants to have admin privilege to manage the network topology. Building a container network can decouple application deployment and management from the underlying network infrastructure, and enable additional usage scenario, such as (i) software-defined networking, and (ii) extending the container network (i.e. connecting various resources from multiple hosting environments). End-users want a single service that could help them build the container network, and dynamically modify the network topology by adding or removing containers to or from the network. 9. Permit secure use of native REST APIs. Provide two models of operation with Magnum. The first model allows Magnum to manage the lifecycle of Pods, ReplicationControllers, and Services. The second model allows end-users to manage the lifecycle of Pods, ReplicationControllers, and Services by providing direct secure access to the native ReST APIs in Kubernetes and possibly Docker. Long Term Use Cases ------------------- These use cases have been identified by the community as important, but unlikely to be tackled in short term (especially prior to incubation). We wish to adapt to these use cases in long term, but this is not a firm project commitment. 1. Multi-region/multi-cloud support. End-user wants to deploy applications to multiple regions/clouds, and dynamically relocate deployed applications across different regions/clouds. In particular, they want a single service that could help them (i) provision nodes from multiple regions/clouds, thus running containers on top of them, and (ii) dynamically relocate containers (e.g. through container migration) between nodes regardless of the underlying infrastructure. Proposed change =============== Add a new API service for CRUD and advanced management of containers. If cloud operators only want to offer basic instance features for their containers, they may use nova with an alternate virt-driver, such as libvirt/lxc or nova-docker. For those wanting a full-featured container experience, they may offer the Containers Service API as well, in combination with Nova instances that contain an OpenStack agent that connects to the containers service through a security controlled agent (daemon) that allows the OpenStack control plane to provision and control containers running on Compute Hosts. The Containers Service will call the Nova API to create one or more Nova instances inside which containers will be created. The Nova instances may be of any type, depending on the virt driver(s) chosen by the cloud operator. This includes bare-metal, virtual machines, containers, and potentially other instance types. This allows the following configurations of containers in OpenStack. * Containers in Virtual Machine Instances * Containers in Bare Metal Instances * Containers in Container Instances (nested) The concept of nesting containers is currently possible if the parent container runs in privileged mode. Patches to the linux kernel are being developed to allow nesting of non-privileged containers as well, which provides a higher level of security. The spirit of this plan aims to duplicate as little as possible between Nova and the Containers Service. Common components like the scheduler are expected to be abstracted into modules, such as Gantt that can be shared by multiple projects. Until Gantt is ready for use by the Containers Service, we will implement only two provisioning schemes for containers: 1. Create a container on a specified instance by using a nova instance guid. 2. Auto-create instances (applies only until the Gantt scheduler is used) 2.1. Fill them sequentially until full. 2.2. Remove them automatically when they become empty. The above orchestration will be implemented using Heat. This requires some kind of hypervisor painting (such as host aggregates) for security reasons. The diagram below offers an overview of the system architecture. The OSC box indicates an OpenStack client, which will communicate with the Containers Service through a REST API. The containers service may silently create Nova instances if one with enough capacity to host the requested container is not already known to the Containers service. The containers service will maintain a database "Map" of containers, and what Nova instance each belongs to. Nova creates instances. Instances are created in Nova, and containers belong only to the Containers Service, and run within a Nova instance. If the instance includes the agent software "A", then it may be included in the inventory of the Containers service. Instances that do not contain an agent may not interact with the Containers Service, and can be controlled only by a Nova virt driver. ::                            +---------+                            |   OSC   |                            +----+----+                                 |                            +----+----+ +-------- Nova -------+  +-+  REST   +-- Containers -+ |                     |  | +---------+    Service    | |                     |  |                           | |           +-------+ +--+ +-----+                   | |           | Gantt | |  | | Map |                   | |           +-------+ |  | +-----+                   | |                     |  |                           | +-----------+---------+  +---------------+-----------+             |                            | +-----------+----+ Compute Host ---------|-----------+ |                                    +---+---+       | |                               +----+ Relay +---+   | |                               |    +-------+   |   | |                               |                |   | | +-- Instance --+ +-- Instance |-+ +-- Instance |-+ | | |              | |            | | |            | | | | |              | |        +---+ | |        +---+ | | | |              | |        |   | | |        |   | | | | |              | |        | A | | |        | A | | | | |              | |        |   | | |        |   | | | | |              | |        +---+ | |        +---+ | | | |              | |              | |              | | | |              | | +---+  +---+ | | +---+  +---+ | | | |              | | |   |  |   | | | |   |  |   | | | | |              | | | C |  | C | | | | C |  | C | | | | |              | | |   |  |   | | | |   |  |   | | | | |              | | +---+  +---+ | | +---+  +---+ | | | |              | |              | |              | | | +--------------+ +--------------+ +--------------+ | |                                                    | +----------------------------------------------------+ +---+ | | | A | = Agent | | +---+ +---+ | | | C | = Container | | +---+ Design Principles ----------------- 1. Leverage existing OpenStack projects for what they are good at. Do not duplicate functionality, or copy code that can be otherwise accessed through API calls. 2. Keep modifications to Nova to a minimum. 3. Make the user experience for end users simple and familiar. 4. Allow for implementation of all features containers are intended to offer. Alternatives ------------ 1. Extending Nova's existing feature set to offer container features 1.1. Container features don't fit into Nova's idea of compute (VM/Server) 2. A completely separate containers service forked from Nova. 2.1. Would result in large overlap and duplication in features and code Data model impact ----------------- For Nova, None. All new data planned will be in the Containers Service. REST API impact --------------- For Nova, none. All new API calls will be implemented in the Containers Service. The OpenStack Containers Service API will be a superset of functionality offered by the, The `Docker Remote API: `_ with additionals to make is suitable for general use regardless of the backend container technology used, and to be compatible with OpenStack multi-tenancy and Keystone authentication. Specific Additions: 1. Support for the X-Auth-Project-Id HTTP request header to allow for multi-tenant use. 2. Support for the X-Auth-Token HTTP request header to allow for authentication with keystone. If either of the above headers are missing, a 401 Unauthorized response will be generated. Docker CLI clients may communicate with a Swarmd instance that is configured to use the OpenStack Containers API as the backend for libswarm. This will allow for tool compatibility with the Docker ecosystem using the officially supported means for integration of a distributed system. The scope of the full API will cause this spec to be too long to review, so the intent is to deal with the specific API design as a series of Gerrit reviews that submit API code as Not Implemented stubs with docstrings that clearly document the design, so allow for approval, and further implementation. Security impact --------------- Because Nova will not be changed, there should be no security impacts to Nova. The Containers Service implementation, will have the following security related issues: * Need to authenticate against keystone using python-keystoneclient. * A trust token from Nova will be needed in order for the Containers Service to call the Nova API on behalf of a user. * Limits must be implemented to control resource consumption in accordance with quotas. * Providing STDIO access may generate a considerable amount of network chatter between containers and clients through the relay. This could lead to bandwidth congestion at the relays, or API nodes. An approach similar to how we handle serial console access today will need to be considered to mitigate this concern. Using containers implies a range of security considerations for cloud operators. These include: * Containers in the same instance share an operating system. If the kernel is exploited using a security vulnerability, processes in once container may escape the constraints of the container and potentially access other resources on the host, including contents of other containers. * Output of processes may be persisted by the containers service in order to allow asynchronous collection of exit status, and terminal output. Such content may include sensitive information. Features may be added to mitigate the risk of this data being replicated in log messages, including errors. * Creating containers usually requires root access. This means that the Agent may need to be run with special privileges, or be given a method to escalate privileges using techniques such as sudo. * User provided data is passed through the API. This will require sensible data input validation. Notifications impact -------------------- Contemplated features (in subsequent release cycles): * Notify the end user each time a Nova instance is created or deleted by the Containers service, if (s)he has registered for such notifications. * Notify the user each on CRUD of containers containing start and end notifications. (compute.container.create/delete/etc) * Notify user periodically of existence of container service managed containers (ex compute.container.exists) Other end user impact --------------------- The user interface will be a REST API. On top of that API will be an implementation of the libswarm API to allow for tools designed to use Docker to treat OpenStack as an upstream system. Performance Impact ------------------ The Nova API will be used to create instances as needed. If the Container to Instance ratio is 10, then the Nova API will be called at least once for every 10 calls to the Containers Service. Instances that are left empty will be automatically deleted, so in the example of a 10:1 ratio, the Nova API will be called to perform a delete for every 10 deletes in the Container Service. Depending on the configuration, the ratio may be as low as 1:1. The Containers Service will only access Nova through its API, not by accessing its database. Other deployer impact --------------------- Deployers may want to adjust the default flavor used for Nova Instances created by the Containers Service. There should be no impact on users of prior releases, as this introduces a new API. Developer impact ---------------- Minimal. There will be minimal changes required in Nova, if any. Implementation ============== Assignee(s) ----------- Primary assignee: aotto Other contributors: andrew-melton ewindisch Work Items ---------- 1. Agent 2. Relay 3. API Service 4. IO Relays Dependencies ============ 1. 2. Early implementations may use libswarm, or a python port of libswarm to implement Docker API compatibility. Testing ======= Each commit will be accompanied with unit tests, and Tempest functional tests. Documentation Impact ==================== A set of documentation for this new service will be required. References ========== * Link to high level draft proposal from the Nova Midcycle Meetup for Juno: `PDF `_ * `Libswarm Source `_ magnum-6.1.0/specs/stats-api-spec.rst0000666000175100017510000001522213244017334017537 0ustar zuulzuul00000000000000======================== Magnum Cluster Stats API ======================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api This proposal is to add a new Magnum statistics API to provide useful metrics to OpenStack administrators/service providers as well as users. Problem Description ------------------- Currently there is no magnum API to get usage metrics. This specification document proposes to add a new stats endpoint to Magnum API. The proposed stats endpoint will provide useful metrics such as overall current usage info to OpenStack service providers and also non-admin tenants will be able to fetch tenant scoped statistics. Use Cases --------- Below given are some of the use cases that can be addressed by implementing stats API for Magnum: 1. A Magnum tenant with admin role would like to get the total number of active clusters, nodes, floating IPs and Cinder volumes for all active tenants. 2. A Magnum tenant with admin role would like to get the total number of active clusters, nodes, floating IPs and Cinder volumes for a specific tenant. 3. A Magnum tenant without admin role can get the total number of active clusters, nodes, floating IPs and Cinder volumes scoped to that tenant. 4. A Magnum tenant would like to discover the sum of allocated server capacity for a given cluster (in terms of aggregate vcpu, memory, local storage, and cinder volume storage). 5. A Magnum tenant with admin role would like to discover the aggregate server capacity (in terms of aggregate vcpu, memory, local storage, and cinder volume storage) allocated by all clusters belonging to a specific tenant or all the tenants. Please note that this is not an exhaustive list of use cases and additional specs will be proposed based on the community needs. Proposed Changes ---------------- The proposed change is to add a new '/stats' REST API endpoint to Magnum service that will provide total number of clusters, nodes, floating IPs, Cinder volumes and also a summary view of server capacity (in terms of aggregate vcpu, memory, local storage, and cinder volume storage) allocated to a cluster, or to all the clusters owned by the given tenant or all the tenants. 1. Add an API that returns total number of clusters, nodes, floating IPs, and Cinder volumes of all tenants. 2. Add an API that returns total number of clusters, nodes, floating IPs, and Cinder volumes of a specific tenant. 3. Add an API that returns aggregate vcpu, memory, local storage, and cinder volume storage for the given cluster. 4. Add an API that returns aggregate vcpu, memory, local storage, and cinder volume storage allocated by all clusters belonging to a specific tenant. 5. Update policy.json file to enable access to '/stats' endpoint to owner and admin (using a policy rule admin_or_owner). In the initial implementation stats data will be aggregated from Magnum DB and/or from other OpenStack services on demand. There will be some interaction between the conductor and the drivers through an interface. If needed, this on-demand stats aggregation implementation can be updated in future without affecting the REST API behavior. For example, if the proposed on-demand data aggregation is not responsive, Magnum conductor may need to collect the stats periodically and save in the Magnum DB. Initial work in progress review [2]. Alternatives ------------ Without proposed stats endpoint, an administrator could use OpenStack clients to get some basic statistics such as server count, volume count etc. by relying on the Magnum naming convention. For example, to get nova instance count: nova list | grep -e "kube-" -e "swarm-" -e "mesos-" | wc For the number of cinder volumes: cinder list | grep "docker_volume" | wc -l For float IPs count: openstack ip floating list -f value|wc -l For clusters count: magnum cluster-list | grep "CREATE_COMPLETE" | wc -l Data model impact ----------------- None, because data will be aggregated and summarized at the time of each stats API request, so no stats need to be persisted in the data store. REST API impact --------------- Add a new REST endpoint '/stats' as shown below: A GET request with admin role to '/stats?type=cluster' will return the total clusters, nodes, floating IPs and Cinder volumes for all active tenants. A GET request without admin role to '/stats?type=cluster' will return the total clusters, nodes, floating IPs and Cinder volumes for the current tenant. A GET request with admin role to '/stats?type=cluster&tenant=' will return the total clusters, nodes, floating IPs and Cinder volumes for the given tenant. A GET request to '/stats?type=cluster&tenant=' without admin role will result in HTTP status code 403 (Permission denied) if the requester tenant-id does not match the tenant-id provided in the URI. If it matches, stats will be scoped to the requested tenant. Other Implementation Option --------------------------- Existing /cluster API can be updated to include stats info as shown below: A 'GET' request with admin role to '/cluster/stats' will return total active clusters and nodes across all the tenants. A 'GET' request to '/cluster/stats/' will return total clusters and nodes for the given tenant. A 'GET' request without admin role to '/cluster/stats/' will result in HTTP status code 403 (Permission denied). This option was discussed and rejected due to the fact that /cluster/stats collide with /cluster/. Security impact --------------- There will be changes to policy.json file that enable access to '/stats' endpoint to owner and admin (using a policy rule admin_or_owner). Notifications impact -------------------- None Other end user impact --------------------- New /stats endpoint will be available to users. Performance impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee vijendar-komalla Work Items ---------- 1. Implement /stats API in Magnum service. 2. Document new API. 3. Update Magnum CLI to expose stats functionality. Dependencies ------------ None Testing ------- 1. Since a new stats endpoint will be introduced with this proposal, need to update some unit tests. 2. Add unit tests and functional tests for new functionality introduced. Documentation Impact -------------------- Update API documentation to include stats API information. References ---------- [1] - Magnum cluster statistics API blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api [2] - Proposed change under review: https://review.openstack.org/391301 magnum-6.1.0/specs/flatten_attributes.rst0000666000175100017510000001550013244017334020604 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================================== Flatten Cluster and ClusterTemplate Attributes ============================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/flatten-attributes Including all user-specified attributes in Clusters and ClusterTemplates will increase flexibility for users during ClusterTemplate definition and Cluster creation. Note that this spec only deals with changes to magnum's data model, not API changes. Please see the NodeGroup spec for these details: https://blueprints.launchpad.net/magnum/+spec/nodegroups Problem Description =================== Clusters rely on attributes from both the magnum Cluster and ClusterTemplate resources, but the line between attributes that belong in one or the other is not well-defined. Most attributes make sense where they are, but there will be times that users will want to capture different attributes in a ClusterTemplate or specify them during cluster creation. The current system has little flexibility, with only keypairs able to exist in either. Use Cases ========= 1. Users that want to specify attributes in ClusterTemplates that they can't right now, such as node count. 2. Users that want to specify/override attributes when creating a Cluster that they can't right now, since attributes that come from ClusterTemplates are currently unchangeable. Proposed Change =============== Give both Cluster and ClusterTemplate a copy of all user-specifed attributes. The python object for ClusterTemplate will work much the same, just with more attributes available. The python object for Cluster will no longer (and should not) need to use attributes from its ClusterTemplate, since it will have all the attributes it needs and it is possible that some attributes will have been overridden in the cluster-create request. For example, `cluster.cluster_template.fixed_network` will become `cluster.fixed_network`. Alternatives ============ The shared fields can be added to the existing Cluster and ClusterTemplate tables. This achieves the same effect, but brings with it the burden of maintaining two sets of the same fields in different tables. Data Model Impact ================= A new database table, ClusterAttributes, will be added. The shared fields will be moved to this table. A foreign key to ClusterAttributes will be added to the Cluster and ClusterTemplate tables. The relationship between Cluster and ClusterAttributes is one-to-one. The same is true between ClusterTemplate and ClusterAttributes. That is, Clusters and ClusterTemplates have their own separate copy of cluster attributes. Database tables before, with fields that will be shared marked: cluster: =================== ======= Attribute Shared? ------------------- ------- id uuid project_id user_id name stack_id status status_reason api_address trust_id trustee_username trustee_user_id trustee_password coe_version container_version ca_cert_ref magnum_cert_ref cluster_template_id node_addresses master_addresses create_timeout Yes discovery_url Yes node_count Yes master_count Yes keypair Yes =================== ======= cluster_template: ===================== ======= Attribute Shared? --------------------- ------- id uuid project_id user_id name public apiserver_port Yes keypair_id Yes labels Yes external_network_id Yes fixed_network Yes fixed_subnet Yes network_driver Yes volume_driver Yes dns_nameserver Yes coe Yes http_proxy Yes https_proxy Yes no_proxy Yes registry_enabled Yes tls_disabled Yes insecure_registry Yes master_lb_enabled Yes floating_ip_enabled Yes image_id Yes flavor_id Yes docker_volume_size Yes docker_storage_driver Yes cluster_distro Yes server_type Yes master_flavor_id Yes ===================== ======= Database tables after: cluster: - id - uuid - project_id - user_id - name - stack_id - status - status_reason - api_address - trust_id - trustee_username - trustee_user_id - trustee_password - coe_version - container_version - ca_cert_ref - magnum_cert_ref - cluster_template_id - node_addresses - master_addresses - FK to cluster_attributes (new) cluster_template: - id - uuid - project_id - user_id - name - public - FK to cluster_attributes (new) cluster_attributes: - id (new) - apiserver_port - create_timeout - discovery_url - node_count - master_count - keypair_id - labels - external_network_id - fixed_network - fixed_subnet - network_driver - volume_driver - dns_nameserver - coe - http_proxy - https_proxy - no_proxy - registry_enabled - tls_disabled - insecure_registry - master_lb_enabled - floating_ip_enabled - image_id - flavor_id - docker_volume_size - docker_storage_driver - cluster_distro - server_type - master_flavor_id REST API Impact =============== None Security Impact =============== None identified Notifications Impact ==================== None Other End-user Impact ===================== None Performance Impact ================== Negligible. Two-table joins should have minimal performance impact. There may be cases where only the Cluster/ClusterTemplate or ClusterAttributes table needs to be queried/written that will further offset the small performance impact or even improve performance since these operations will be dealing with narrower tables. Other Deployer Impact ===================== This change will require a database migration. Developer Impact ================ Developers will not have to remember which attributes come from ClusterTemplate because they will all be available in Cluster. Implementation ============== Assignee(s) ----------- Spyros Trigazis (strigazi) Work Items ---------- 1. Database migration to add ClusterAttributes table. 2. Updates to python code. Dependencies ============ None Testing ======= Unit tests will need to be updated, but functional tests will still pass as this is an internal change. Documentation Impact ==================== None for this spec, as the changes are internal. References ========== None magnum-6.1.0/specs/create-trustee-user-for-each-bay.rst0000666000175100017510000001356513244017334023055 0ustar zuulzuul00000000000000================================== Create a trustee user for each bay ================================== https://blueprints.launchpad.net/magnum/+spec/create-trustee-user-for-each-bay Some services which are running in a bay need to access OpenStack services. For example, Kubernetes load balancer [1]_ needs to access Neutron. Docker registry [2]_ needs to access Swift. In order to access OpenStack services, we can create a trustee for each bay and delegate a limited set of rights to the trustee. [3]_ and [4]_ give a brief introduction to Keystone's trusts mechanism. Problem description =================== Some services which are running in a bay need to access OpenStack services, so we need to pass user credentials into the vms. Use Cases --------- 1. Kubernetes load balancer needs to access Neutron [1]_. 2. For persistent storage, Cloud Provider needs to access Cinder to mount/unmount block storage to the node as volume [5]_. 3. TLS cert is generated in the vms and need to be uploaded to Magnum [6]_ and [7]_. 4. Docker registry needs to access Swift [2]_. Project Priority ---------------- High Proposed change =============== When a user (the "trustor") wants to create a bay, steps for trust are as follows. 1. Create a new service account (the "trustee") without any role in a domain which is dedicated for trust. Without any role, the service account can do nothing in Openstack. 2. Define a trust relationship between the trustor and the trustee. The trustor can delegate a limited set of roles to the trustee. We can add an option named trust_roles in baymodel. Users can add roles which they want to delegate into trust_roles. If trust_roles is not provided, we delegate all the roles to the trustee. 3. Services in the bay can access OpenStack services with the trustee credentials and the trust. The roles which are delegated to the trustee should be limited. If the services in the bay only need access to Neutron, we should not allow the services to access to other OpenStack services. But there is a limitation that a trustor must have the role which is delegated to a trustee [4]_. Magnum now only allows the user who create the bay to get the certificate to avoid the security risk introduced by Docker [8]_. For example, if other users in the same tenant can get the certificate, then they can use Docker API to access the host file system of a bay node and get anything they want:: docker run --rm -v /:/hostroot ubuntu /bin/bash \ -c "cat /hostroot/etc/passwd" If Keystone doesn't allow to create new service accounts when LDAP is used as the backend for Keystone, we can use a pre-create service account for all bays. In this situation, all the bays use the same service account and different trust. We should add an config option to choose this method. Alternatives ------------ Magnum can create a user for each bay with roles to access OpenStack Services in a dedicated domain. The method has one disadvantage. The user which is created by magnum may get the access to OpenStack services which this user can not access before. For example, a user can not access Swift service and create a bay. Then Magnum create a service account for this bay with roles to access Swift. If the user logins into the vms and get the credentials, the user can use these credentials to access Swift. Or Magnum doesn't prepare credentials and the user who create a bay needs to login into the nodes to manully add credentials in config files for services. Data model impact ----------------- Trustee id, trustee password and trust id are added to Bay table in Magnum database. REST API impact --------------- Only the user who create a bay can get the certificate of this bay. Other users in the same tenant can not get the certificate now. Security impact --------------- Trustee id and trustee password are encrypted in magnum database. When Magnum passes these parameters to heat to create a stack, the transmission is encrypted by tls, so we don't need to encrypt these credentials. These credentials are hidden in heat, users can not query them in stack parameters. Trustee id, trustee password and trust id can be obtained in the vms. Anyone who can login into the vms can get them and use these credentials to access OpenStack services. In a production environment, these vms must be secured properly to prevent unauthorized access. Only the user who create the bay can get the certificate to access the COE api, so it is not a security risk even if the COE api is not safe. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: humble00 (wanghua.humble@gmail.com) Other contributors: None Work Items ---------- 1. Create an trustee for each bay. 2. Change the policy so that only the user who create a bay can get the certificate of the bay. Dependencies ============ None Testing ======= Unit test and functional test for service accounts and the policy change. Documentation Impact ==================== The user guide and troubleshooting guide will be updated with details regarding the service accounts. References ========== .. [1] http://docs.openstack.org/developer/magnum/dev/kubernetes-load-balancer.html .. [2] https://blueprints.launchpad.net/magnum/+spec/registryv2-in-master .. [3] http://blogs.rdoproject.org/5858/role-delegation-in-keystone-trusts .. [4] https://wiki.openstack.org/wiki/Keystone/Trusts .. [5] https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/mysql-cinder-pd/README.md .. [6] https://bugs.launchpad.net/magnum/+bug/1503863 .. [7] https://review.openstack.org/#/c/232152/ .. [8] https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface History ======= None magnum-6.1.0/specs/bay-drivers.rst0000666000175100017510000002745513244017334017144 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ====================================== Container Orchestration Engine drivers ====================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/bay-drivers Container Orchestration Engines (COEs) are different systems for managing containerized applications in a clustered environment, each having their own conventions and ecosystems. Three of the most common, which also happen to be supported in Magnum, are: Docker Swarm, Kubernetes, and Mesos. In order to successfully serve developers, Magnum needs to be able to provision and manage access to the latest COEs through its API in an effective and scalable way. Problem description =================== Magnum currently supports the three most popular COEs, but as more emerge and existing ones change, it needs an effective and scalable way of managing them over time. One of the problems with the current implementation is that COE-specific logic, such as Kubernetes replication controllers and services, is situated in the core Magnum library and made available to users through the main API. Placing COE-specific logic in a core API introduces tight coupling and forces operators to work with an inflexible design. By formalising a more modular and extensible architecture, Magnum will be in a much better position to help operators and consumers satisfy custom use-cases. Use cases --------- 1. Extensibility. Contributors and maintainers need a suitable architecture to house current and future COE implementations. Moving to a more extensible architecture, where core classes delegate to drivers, provides a more effective and elegant model for handling COE differences without the need for tightly coupled and monkey-patched logic. One of the key use cases is allowing operators to customise their orchestration logic, such as modifying Heat templates or even using their own tooling like Ansible. Moreover, operators will often expect to use a custom distro image with lots of software pre-installed and many special security requirements that is extremely difficult or impossible to do with the current upstream templates. COE drivers solves these problems. 2. Maintainability. Moving to a modular architecture will be easier to manage in the long-run because the responsibility of maintaining non-standard implementations is shifted into the operator's domain. Maintaining the default drivers which are packaged with Magnum will also be easier and cleaner since logic is now demarcated from core codebase directories. 3. COE & Distro choice. In the community there has been a lot of discussion about which distro and COE combination to support with the templates. Having COE drivers allows for people or organizations to maintain distro-specific implementations (e.g CentOS+Kubernetes). 4. Addresses dependency concerns. One of the direct results of introducing a driver model is the ability to give operators more freedom about choosing how Magnum integrates with the rest of their OpenStack platform. For example, drivers would remove the necessity for users to adopt Barbican for secret management. 5. Driver versioning. The new driver model allows operators to modify existing drivers or creating custom ones, release new bay types based on the newer version, and subsequently launch news bays running the updated functionality. Existing bays which are based on older driver versions would be unaffected in this process and would still be able to have lifecycle operations performed on them. If one were to list their details from the API, it would reference the old driver version. An operator can see which driver version a bay type is based on through its ``driver`` value, which is exposed through the API. Proposed change =============== 1. The creation of new directory at the project root: ``./magnum/drivers``. Each driver will house its own logic inside its own directory. Each distro will house its own logic inside that driver directory. For example, the Fedora Atomic distro using Swarm will have the following directory structure: :: drivers/ swarm_atomic_v1/ image/ ... templates/ ... api.py driver.py monitor.py scale.py template_def.py version.py The directory name should be a string which uniquely identifies the driver and provides a descriptive reference. The driver version number and name are provided in the manifest file and will be included in the bay metadata at cluster build time. There are two workflows for rolling out driver updates: - if the change is relatively minor, they modify the files in the existing driver directory and update the version number in the manifest file. - if the change is significant, they create a new directory (either from scratch or by forking). Further explanation of the three top-level files: - an ``image`` directory is *optional* and should contain documentation which tells users how to build the image and register it to glance. This directory can also hold artifacts for building the image, for instance diskimagebuilder elements, scripts, etc. - a ``templates`` directory is *required* and will (for the foreseeable future) store Heat template YAML files. In the future drivers will allow operators to use their own orchestration tools like Ansible. - ``api.py`` is *optional*, and should contain the API controller which handles custom API operations like Kubernetes RCs or Pods. It will be this class which accepts HTTP requests and delegates to the Conductor. It should contain a uniquely named class, such as ``SwarmAtomicXYZ``, which extends from the core controller class. The COE class would have the opportunity of overriding base methods if necessary. - ``driver.py`` is *required*, and should contain the logic which maps controller actions to COE interfaces. These include: ``bay_create``, ``bay_update``, ``bay_delete``, ``bay_rebuild``, ``bay_soft_reboot`` and ``bay_hard_reboot``. - ``version.py`` is *required*, and should contain the version number of the bay driver. This is defined by a ``version`` attribute and is represented in the ``1.0.0`` format. It should also include a ``Driver`` attribute and should be a descriptive name such as ``swarm_atomic``. Due to the varying nature of COEs, it is up to the bay maintainer to implement this in their own way. Since a bay is a combination of a COE and an image, ``driver.py`` will also contain information about the ``os_distro`` property which is expected to be attributed to Glance image. - ``monitor.py`` is *optional*, and should contain the logic which monitors the resource utilization of bays. - ``template_def.py`` is *required* and should contain the COE's implementation of how orchestration templates are loaded and matched to Magnum objects. It would probably contain multiple classes, such as ``class SwarmAtomicXYZTemplateDef(BaseTemplateDefinition)``. - ``scale.py`` is *optional* per bay specification and should contain the logic for scaling operations. 2. Renaming the ``coe`` attribute of BayModel to ``driver``. Because this value would determine which driver classes and orchestration templates to load, it would need to correspond to the name of the driver as it is registered with stevedore_ and setuptools entry points. During the lifecycle of an API operation, top-level Magnum classes (such as a Bay conductor) would then delegate to the driver classes which have been dynamically loaded. Validation will need to ensure that whichever value is provided by the user is correct. By default, drivers are located under the main project directory and their namespaces are accessible via ``magnum.drivers.foo``. But a use case that needs to be looked at and, if possible, provided for is drivers which are situated outside the project directory, for example in ``/usr/share/magnum``. This will suit operators who want greater separation between customised code and Python libraries. 3. The driver implementations for the 3 current COE and Image combinations: Docker Swarm Fedora, Kubernetes Fedora, Kubernetes CoreOS, and Mesos Ubuntu. Any templates would need to be moved from ``magnum/templates/{coe_name}`` to ``magnum/drivers/{driver_name}/templates``. 4. Removal of the following files: :: magnum/magnum/conductor/handlers/ docker_conductor.py k8s_conducter.py Design Principles ----------------- - Minimal, clean API without a high cognitive burden - Ensure Magnum's priority is to do one thing well, but allow extensibility by external contributors - Do not force ineffective abstractions that introduce feature divergence - Formalise a modular and loosely coupled driver architecture that removes COE logic from the core codebase Alternatives ------------ This alternative relates to #5 of Proposed Change. Instead of having a drivers registered using stevedore_ and setuptools entry points, an alternative is to use the Magnum config instead. Data model impact ----------------- Since drivers would be implemented for the existing COEs, there would be no loss of functionality for end-users. REST API impact --------------- Attribute change when creating and updating a BayModel (``coe`` to ``driver``). This would occur before v1 of the API is frozen. COE-specific endpoints would be removed from the core API. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- There will be deployer impacts because deployers will need to select which drivers they want to activate. Performance Impact ------------------ None Other deployer impact --------------------- In order to utilize new functionality and bay drivers, operators will need to update their installation and configure bay models to use a driver. Developer impact ---------------- Due to the significant impact on the current codebase, a phased implementation approach will be necessary. This is defined in the Work Items section. Code will be contributed for COE-specific functionality in a new way, and will need to abide by the new architecture. Documentation and a good first implementation will play an important role in helping developers contribute new functionality. Implementation ============== Assignee(s) ----------- Primary assignee: murali-allada Other contributors: jamiehannaford strigazi Work Items ---------- 1. New ``drivers`` directory 2. Change ``coe`` attribute to ``driver`` 3. COE drivers implementation (swarm-fedora, k8s-fedora, k8s-coreos, mesos-ubuntu). Templates should remain in directory tree until their accompanying driver has been implemented. 4. Delete old conductor files 5. Update client 6. Add documentation 7. Improve user experience for operators of forking/creating new drivers. One way we could do this is by creating new client commands or scripts. This is orthogonal to this spec, and will be considered after its core implementation. Dependencies ============ None Testing ======= Each commit will be accompanied with unit tests, and Tempest functional tests. Documentation Impact ==================== A set of documentation for this architecture will be required. We should also provide a developer guide for creating a new bay driver and updating existing ones. References ========== `Using Stevedore in your Application `_. .. _stevedore: http://docs.openstack.org/developer/stevedore/ magnum-6.1.0/specs/container-volume-integration-model.rst0000666000175100017510000004602513244017334023615 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================= Magnum Container Volume Integration Model ========================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-integrate-with-cinder Storage is a key part of any computing system. Containers in particular have the interesting characteristic that local storage by default is ephemeral: any changes to the file system disappear when the container is deleted. This introduces the need for persistent storage to retain and share data between containers, and this is currently an active area of development in all container orchestration engines (COE). As the component in OpenStack for managing COE's, Magnum must fully enable the features for persistent storage in the COE's. To achieve this goal, we propose in this specification to generalize the process for utilizing persistent storage with containers so that it is applicable for different bay types. Despite the complexity, we aim to maintain a good user experience by a simple abstraction for working with various volume capabilities. For the rest of this specification, we will use the term Volume to refer to persistent storage, and Volume Driver as the plugin in a COE to support the particular persistent storage. Problem Description =================== Containers requires full life cycle management such as create, run, stop, delete,... and a key operation is to manage the data - making the data persistent, reusing the data, sharing data between containers, etc. In this area, the support for container volume is undergoing rapid change to bring more integration with open source software and third party storage solutions. A clear evidence of this growth is the many plugin volume drivers [1]_ [4]_ such as NFS, GlusterFS, EBS, etc. They provide different functionality, use different storage backend and have different requirements. The COE's are naturally motivated to be flexible and allow as many choices as possible for the users with respect to the storage backend. Since Magnum's role is to support the COE's within OpenStack, the goal is to be transparent and enable these same storage backends for the COE's through the COE's lifecycle operation. Currently, Magnum provides limited support for managing container volume . The only option available is to specify the docker-volume-size for a pre-allocated block storage in the COE to host the containers. Magnum instantiates container volumes through Heat templates, exposing no other mechanism to configure and operate on volumes. In practice, some users require the ability to manage volumes easily in the COEs . Note that we are not proposing to create a new volume management interface in Magnum. After the users create the baymodel and bays, we assume that the users would manage the volumes through existing techniques: 1. Log in to the COE, use COE specific CLI or GUI to manage volumes. 2. Use native tools to manage volumes. The initial implementation will focus on OpenStack Cinder integration; as other alternatives become available, contributors are welcome through 3rd-party maintained projects. Definitions ----------- COE Container Orchestration Engine Baymodel An object that stores template information about the bay which is used to create new bays consistently. Bay A Magnum resource that includes at least one host to run containers on, and a COE to manage containers created on hosts within the bay. Pod Is the smallest deployable unit that can be created, scheduled, and managed within Kubernetes. Volume storage that is persistent Volume plugin COE specific code that supports the functionality of a type of volume. Additional Magnum definitions can be found in the Magnum Developer documentation[7]_ . Use Cases ---------- This document does not intend to address all use cases. We list below a number of use cases for 3 different roles; they should be useful as reference for the long-term development of the Magnum Container Volume Integration. As a User: As mentioned above, our goal is to preserve the user experience specific to the COE in managing the volumes. Therefore, we expect the use cases for the users will be fulfilled by the COE's themselves; Magnum will simply ensure that the necessary supports are in place. 1. I need to easily create volume for containers to use as persistent data store. 2. I need the ability to create and mount a data volume container for cross container sharing. 3. I need to mount a host directory as a data volume. 4. I need to easily attach a known volume to container to use the existing data. 5. I need the ability to delete the volume. 6. I need to list and view the details of the volume 7. I need to modify the volume. As a CSP: 1. I need to easily deploy a bay for consumption by users. The bay must support the following: A. One or more hosts to run containers. B. The ability to choose between virtual or physical hosts to run containers. C. The ability to automatically enable volume plugins to containers. 2. I need to provide clustering options that support different volume plugins per COE. 3. After deploying my initial cluster, I need the ability to provide lifecycle management, including: A. The ability to add/remove volumes that containers used. B. The ability to add/remove nodes within the cluster with the necessary adjustment to the volumes As a CP: 1. I need to easily and reliably add the Magnum service to my existing OpenStack cloud environment. 2. I need to make the Magnum services highly-available. 3. I need to make Magnum services highly performant. 4. I need to easily scale-out Magnum services as needed. Proposed Changes ================ We propose extending Magnum as follows. 1. The new attribute volume-driver for a baymodel specifies the volume backend driver to use when deploying a bay. Volume drivers may include: rexray, flocker, nfs, glusterfs, etc.. Here is an example of creating a Docker Swarm baymodel that uses rexray [5]_ [6]_ as the volume driver: :: magnum baymodel-create --name swarmbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe swarm\ --network-driver flannel \ --volume-driver rexray When a Swarm bay is created with this bay model, the REX-Ray storage subsystem will be installed, configured and started on the Swarm nodes, then the REX-Ray volume plugin will be registered in Docker. When a container is created with rexray as the volume driver, the container will have full access to the REX-Ray capabilities such as creating, mounting, deleting volumes [6]_. REX-Ray in turn will interface with Cinder to manage the volumes in OpenStack. Here is an example of creating a Kubernetes baymodel that uses Cinder [2]_ [3]_ as the volume driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes\ --network-driver flannel \ --volume-driver cinder When the Kubernetes bay is created using this bay model, the kubelet will be configured so that an existing Cinder volume can be mounted in a pod by specifying the volume ID in the pod manifest as follows: :: volumes: - name: mysql-persistent-storage cinder: volumeID: bd82f7e2-wece-4c01-a505-4acf60b07f4a fsType: ext4 Here is an example of creating a mesos baymodel that uses rexray as the volume driver: :: magnum baymodel-create --name mesosbaymodel \ --image-id ubuntu-mesos\ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --coe mesos\ --network-driver docker \ --volume-driver rexray When the mesos bay is created using this bay model, the mesos bay will be configured so that an existing Cinder volume can be mounted in a container by configuring the parameters to mount the cinder volume in the json file. :: "parameters": [ { "key": "volume-driver", "value": "rexray" }, { "key": "volume", "value": "redisdata:/data" } ] If no volume-driver parameter is supplied by the user, the baymodel is created using the default volume driver of the particular COE. Magnum will provide a default volume driver for each COE as well as the reasonable default configuration for each driver so that users can instantiate a COE without supplying a volume driver and associated labels. Generally the defaults should be consistent with upstream volume driver projects. 2. Each volume driver supports a range of configuration parameters that are handled by the "labels" attribute. Labels consist of one or more arbitrary key/value pairs. Here is an example of using labels to choose ¡°storage-provider¡± for rexray driver. Volume driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id ${NIC_ID} \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --volume-driver rexray \ --labels storage-provider=openstack \ [, key2=value2...] If the --volume-driver flag is specified without any labels, default configuration values of the driver will be used by the baymodel. Magnum will validate the labels together with the driver specified before creating the bay and will return an error if the validation fails. Magnum will continue to CRUD bays in the same way: magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 3. Update python-magnumclient to handle the new container volume- driver attributes. 4. Update the conductor template definitions to support the new container volume-driver model attributes. 5. Refactor Heat templates to support the Magnum volume driver plugin. Configurations specific to volume drivers should be implemented in one or more template fragments. Top-level templates should only expose the labels and generalized parameters such as volume-driver. Heat templates, template definitions and definition entry points should be designed for composition, allowing for a range of supported labels. 6. Update unit and functional tests to support the new attributes of the Magnum container volume driver. 7. Preserve the user experience by ensuring that any operation on volume will be identical between a COE deployed by Magnum and a COE deployed by other methods. Alternatives ------------ 1. Without the support proposed, the user will need to manually enable and configure the volume plugin. This will require the user to log into the nodes in the cluster and understand the low level infrastructure of the cluster as deployed by the heat templates. 2. We can add full support for managing container volume in Magnum user interface itself. This will require adding abstractions for each supported COE volume plugins driver or creating an abstraction layer that covers all possible COE volume drivers. Data Model Impact ----------------- This document adds the volume-driver attribute to the baymodel database table. A migration script will be provided to support the attribute being added. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ +-------------------+-----------------+---------------------------------------------+ | volume-driver | string | Container volume backend implementation | +-------------------+-----------------+---------------------------------------------+ REST API Impact --------------- This document adds volume-driver attribute to the BayModel API class. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ +-------------------+-----------------+---------------------------------------------+ | volume-driver | string | Container volume backend implementation | +-------------------+-----------------+---------------------------------------------+ Security Impact --------------- Supporting volume drivers can potentially increase the attack surface on containers. Notifications Impact -------------------- None Other End User Impact --------------------- There is no impact if the user does not use a volume driver. We anticipate that most users would not use the labels for volume and would simply use the default volume driver and associated configuration options. For those who wish to customize their container volume driver environment, it will be important to understand what volume-driver and labels are supported, along with their associated configuration options, capabilities, etc.. Performance Impact ------------------ There is no impact if the user does not use a volume driver. When a volume driver is used, the performance will depend upon the specific volume driver and its associated storage backends. For example, Kubernetes supports Cinder and awsEBS; the two types of volumes can have different performance. An example of the second case is a docker swarm bay with "--volume-driver rexray" where the rexray driver's storage provider is OpenStack cinder. The resulting performance for container may vary depending on the storage backends. As listed in [8]_ , Cinder supports many storage drivers. Besides this, different container volume driver can also cause performance variance. High-Availability Impact ------------------------------ +-----------------+--------------------+--------------------------+ | COE | Master HA | Pod/Container/App HA | +=================+====================+==========================+ | Kubernetes | No | Yes | +-----------------+--------------------+--------------------------+ | Docker Swarm | No | Yes | +-----------------+--------------------+--------------------------+ | Mesos | No | No | +-----------------+--------------------+--------------------------+ "No" means that the volume doesn't affect the high-availability. "Yes" means that the volume affect the high-availability. Kubernetes does support pod high-availability through the replication controller, however, this doesn't work when a pod with volume attached fails. Refer the link [11]_ for details. Docker swarm doesn't support the containers rescheduling when a node fails, so volume can not be automatically detached by volume driver. Refer the link [12]_ for details. Mesos supports the application high-availability when a node fails, which means application would be started on new node, and volumes can be automatically attached to the new node by the volume driver. Other Deployer Impact --------------------- Currently, both Kubernetes and Docker community have supported some volume plugins. The changes proposed will enable these volume plugins in Magnum. However, Magnum users will be able to continue to deploy baymodels, bays, containers, etc. without having to specify any parameters for volume. This will be accomplished by setting reasonable default parameters within the Heat templates. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: - Kai Qiang Wu (Kennan) Other contributors: - Qun Wang (wangqun) - Ton Ngo (Tango) Work Items ---------- 1. Extend the Magnum API to support new baymodel attributes. 2. Extend the Client API to support new baymodel attributes. 3. Extend baymodel objects to support new baymodel attributes. Provide a database migration script for adding attributes. 4. Refactor Heat templates to support the Magnum container volume driver. 5. Update Conductor template definitions and definition entry points to support Heat template refactoring. 6. Extend unit and functional tests to support new baymodel attributes. 7. Document how to use the volume drivers with examples. Dependencies ============ Although adding support for these new attributes does not depend on the following blueprints, it's highly recommended that the Magnum Container Networking Model be developed in concert with the blueprints to maintain development continuity within the project. https://blueprints.launchpad.net/magnum/+spec/ubuntu-image-build Kubernetes with cinder support need Kubernetes version >= 1.1.1 Swarm need version >= 1.8.3, as Kubernetes 1.1.1 upgraded to that version Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the configuration flags introduced by this document. Additionally, background information on how to use these flags will be included. References ========== .. [1] http://kubernetes.io/v1.1/docs/user-guide/volumes.html .. [2] http://kubernetes.io/v1.1/examples/mysql-cinder-pd/ .. [3] https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/cinder .. [4] http://docs.docker.com/engine/extend/plugins/ .. [5] https://github.com/emccode/rexray .. [6] http://rexray.readthedocs.org/en/stable/user-guide/storage-providers/openstack .. [7] http://docs.openstack.org/developer/magnum/ .. [8] http://docs.openstack.org/liberty/config-reference/content/section_volume-drivers.html .. [9] http://docs.openstack.org/admin-guide-cloud/blockstorage_multi_backend.html# .. [10] http://docs.openstack.org/user-guide-admin/dashboard_manage_volumes.html .. [11] https://github.com/kubernetes/kubernetes/issues/14642 .. [12] https://github.com/docker/swarm/issues/1488 magnum-6.1.0/specs/resource-quotas.rst0000666000175100017510000002424213244017334020045 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================== Quota for Magnum Resources ========================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/resource-quota There are multiple ways to slice an OpenStack cloud. Imposing quota on these various slices puts a limitation on the amount of resources that can be consumed which helps to guarantee "fairness" or fair distribution of resource at the creation time. If a particular project needs more resources, the concept of quota, gives the ability to increase the resource count on-demand, given that the system constraints are not exceeded. Problem description =================== At present in Magnum we don't have the concept of Quota on Magnum resources as a result of which, as long as the underlying Infrastructure as a Service(IaaS) layer has resources, any user can consume as many resources as they want, with the hardlimit associated with the tenant/project being the upper bound for the resources to be consumed. Quotas are tied closely to physical resources and are billable entity and hence from Magnum's perspective it makes sense to limit the creation and consumption of a particular kind of resource to a certain value. Use cases --------- Alice is the admin. She would like to have the feature which will give her details of Magnum resource consumption so that she can manage her resource appropriately. a. Ability to know current resource consumption. b. Ability to prohibit overuse by a project. c. Prevent situation where users in the project get starved because users in other project consume all the resources. Alice feels something like "Quota Management" would help to guarantee "fairness". d. Prevent DOS kind of attack, abuse or error by users where an excessive amount of resources are created. Proposed change =============== Proposed change is to introduce a Quota Table which will primarily store the quota assigned to each resource in a project. For Mitaka, we will restrict the scope to a Bay, which are Magnum resources. Primarily, as a first step we will start of by imposing quota on number of bays to be created in a project. The change also plans to introduce REST API's to GET/PUT/POST/DELETE. CLIs to get information of Quota for a particular project will also be provided. For Mitaka, we will restrict the scope of the resources explicit created and managed by Magnum. Specifically for Mitaka we will focus on number of Bays only. Going ahead we might add Quota for containers, etc. The resources of which a Bay is constructed out of is inherently not only Magnum resource but involve resource from Nova, Cinder, Neutron etc. Limiting those resource consumption is out of the scope of this spec and needs a close collaboration with the quota management framework of the orchestration layer, since the orchestration layer can invoke the respective IaaS projects API's and get the consumption details before provisioning. As of now the orchestration layer used by Magnum, Heat, does not have the concept of Quota, so we will start with imposing Quota on resources which Magnum manages, Bay, more specifically for Mitaka. When a project is created and if the Magnum service is running, the default quota for Magnum resources will be set by the values configured in magnum.conf. Other Openstack projects like Nova [2]_, Cinder [3]_ follow a similar pattern and we will also do so and hence won't have a separate CLI for quota-create. Later if the user wants to change the Quota of the resource option will be provided to do so using magnum quota-update. In situation where all of the quota for a specific Magnum resource (Bay) has been consumed and is under use, admin will be allowed to set the quota to a any value lower than the usage or hardlimit to prohibit users from the project to create new Bays. This gives more flexibility to the admin to have a better control on resource consumption. Till the time the resource is not explicitly deleted the quota associated with the project, for a particular resource, won't be decreased. In short quota-update support will take into consideration the new hardlimit for a resource, specified by the admin, and will set the new value for this resource. Before the resource is created, Magnum will check for current count of the resource(Bays) created for a project. If the resource(Bay) count is less than the hardlimit set for the Bay, new Bay creation will be allowed. Since Bay creation is a long running operation, special care will be taken while computing the available quota. For example, 'in-progress' field in the Quota usages table will be updated when the resource(Bay) creation is initiated and is in progress. Lets say the quota hardlimit is 5 and 3 Bay's have already been created and two new requests come in to create new Bays. Since we have 3 Bays already created the 'used' field will be set to 3. Now the 'in-progress' field will be set to 2 till the time the Bay creation is successful. Once the Bay creation is done this field will be reset to 0, and the 'used' count will be updated from 3 to 5. So at this moment, hardlimit is 5, used is 5 and in-progress is 0. So lets say one more request comes in to create new Bay this request will be prohibited since there is not enough quota available. For Bays, available = hard_limit - [in_progress + used] In general, Resource quota available = Resource hard_limit - [ (Resource creation in progress + Resources already created for project)] Alternatives ------------ At present there is not quota infrastructure in Magnum. Adding Quota Management layer at the Orchestration layer, Heat, could be an alternative. Doing so will give a finer view of resource consumption at the IaaS layer which can be used while provisioning Magnum resources which depend on the IaaS layer [1]_. Data model impact ----------------- New Quota and Quota usages table will be introduced to Magnum database to store quota consumption for each resource in a project. Quota Table : +------------+--------------+------+-----+---------+----------------+ | Field | Type | Null | Key | Default | Extra | +------------+--------------+------+-----+---------+----------------+ | id | int(11) | NO | PRI | NULL | auto_increment | | created_at | datetime | YES | | NULL | | | updated_at | datetime | YES | | NULL | | | project_id | varchar(255) | YES | MUL | NULL | | | resource | varchar(255) | NO | | NULL | | | hard_limit | int(11) | YES | | NULL | | +------------+--------------+------+-----+---------+----------------+ Quota usages table : +---------------+--------------+------+-----+---------+----------------+ | Field | Type | Null | Key | Default | Extra | +---------------+--------------+------+-----+---------+----------------+ | created_at | datetime | YES | | NULL | | | updated_at | datetime | YES | | NULL | | | id | int(11) | NO | PRI | NULL | auto_increment | | project_id | varchar(255) | YES | MUL | NULL | | | resource | varchar(255) | NO | | NULL | | | in_progress | int(11) | NO | | NULL | | | used | int(11) | NO | | NULL | | +---------------+--------------+------+-----+---------+----------------+ REST API impact --------------- REST API will be added for : 1. quota-defaults List all default quotas for all tenants. 2. quota-show List the currently set quota values for a tenant. 3. quota-update Updates quotas for a tenant. 4. quota-usage Lists quota usage for a tenant. 5. quota-list List quota for all the tenants. A user with "admin" role will be able to do all the above operations but a user with "non-admin" role will be restricted to only get/list quota associated to his/her tenant. User with "non-admin" role can be a Member of the tenant less "admin" role. REST API for resources which will have quota imposed will be enhanced : 1. Bay create Will check if there is quota available for Bay creation, if so proceed ahead with the request otherwise throw exception that not enough quota is available. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- End user will have the option to look at the quota set on the resources, quota usage by a particular project. Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: vilobhmm Other contributors: None Work Items ---------- 1. Introduce Quota and Quota usages table in Magnum database. 2. Introduce API to set/update Quota for a resource, specifically bay, for Mitaka release. 3. Introduce API to create Quota entry, by default, for a resource. 4. Provide config options that will allow users/admins to set Quota. 5. Make sure that if the resource is deleted the used count from the quota_usages table will be decremented by the number of resources deleted. For example, if resource, bay, is deleted then the entries for it in the Quota usages table should be decremented by the number of Bays deleted. 6. Provide CLI options to view the quota details : a. magnum quota-show b. magnum quota-update c. magnum quota-defaults d. magnum quota-usage e. magnum quota-list 7. Add conf setting for bays default quota since we will focus on Bays for Mitaka. Dependencies ============ None Testing ======= 1. Each commit will be accompanied with unit tests. 2. Gate functional tests will also be covered. Documentation Impact ==================== None References ========== .. [1] http://lists.openstack.org/pipermail/openstack-dev/2015-December/082266.html .. [2] https://github.com/openstack/nova/blob/master/nova/quota.py .. [3] https://github.com/openstack/nova/blob/master/cinder/quota.py magnum-6.1.0/specs/magnum-horizon-plugin.rst0000666000175100017510000001236613244017334021156 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =================================== Web Interface for Magnum in Horizon =================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-horizon-plugin Currently there is no way for a user to interact with Magnum through a web based user interface, as they are used to doing with other OpenStack components. This implementation aims to introduce this interface as an extension of Horizon (the OpenStack Dashboard) and expose all the features of Magnum in a way familiar to users. Problem description =================== In order to increase adoption and usability of Magnum we need to introduce a UI component for users and administrators to interact with Magnum without the need to use the command line. The UI proposed to be built will model all of the features currently available in the Magnum REST API and built using the Horizon plugin architecture to remain in line with other OpenStack UI projects and minimise the amount of new code that needs to be added. Use Cases ---------- 1. An end user wanting to use Magnum with OpenStack who is not comfortable in issuing commands with the python client will use the web user interface to interact with Magnum. 2. An administrator may use the user interface to provide a quick overview of what Magnum has deployed in their OpenStack environment. Proposed change =============== The first step will be to extend the Horizon API to include CRUD operations that are needed to interact with Magnum. Assuming that there are no issues here and API changes/additions are not required to Magnum, we can begin to design/implement the interface. We will aim to minimize the amount of Magnum specific UI code that will need to be maintained by reusing components from Horizon. This will also speed up the development significantly. It is suggested the initial implementation of Magnum UI will include basic CRUD operations on BayModel and Bay resources. This will be the starting point for development and upon completion this will represent version 1. Future direction includes adding CRUD operations for other Magnum features (Pod, Container, Service, ReplicationController) and will be tracked by new blueprints as they represent significant additional effort. The ultimate goal, a user should be able to perform all normal interactions with Magnum through the UI with no need for interaction with the python client. Suggestions for further improvement include visualising Magnum resources to provide a quick overview of how resources are deployed. Bugs/Blueprints relating specifically to the Magnum UI will be tracked here: https://launchpad.net/magnum-ui Mockups/Designs will be shared using the OpenStack Invision account located here: https://openstack.invisionapp.com Alternatives ------------ One alternative to this approach is to develop an entirely separate UI specifically for Magnum. We will not use this approach as it does not fall in line with how other projects are managing their user interfaces and this approach would ultimately result in a significantly larger effort with much duplication with Horizon. Data model impact ----------------- None REST API impact --------------- For Magnum, none. The Horizon API will need to be extended to include Create, Read, Update, Delete operations for all features available in the Magnum REST API. However, this extension to the Horizon API will live in the Magnum UI tree not the upstream Horizon tree. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ The Magnum API will be called from the user interface to return information to the user about the current state of Magnum objects and perform new interactions with Magnum. For every action a user performs from the user interface at least one API call to Magnum will need to be made. Other deployer impact --------------------- As the Magnum user interface will be managed and stored outside of the Horizon project deployers will need to pull down the Magnum UI code and add this to their Horizon install. In order to add the Magnum UI to Horizon the deployer will have to copy an enable file to openstack_dashboard/local/enabled/ in their Horizon directory and then run Horizon as they would normally. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: bradjones Work Items ---------- 1. Extend Horizon API in include Magnum calls 2. CRUD operations on BayModel and Bay resources 3. CRUD operations on other Magnum features (Pod, Container, Service, etc.) 4. Refine the user experience Dependencies ============ None Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. This additional gate test will be non-voting as failures will not indicate issues with Magnum but instead serves as advanced warning of any changes that could potentially break the UI. Documentation Impact ==================== An installation guide will be required. References ========== None magnum-6.1.0/specs/tls-support-magnum.rst0000666000175100017510000001420613244017334020501 0ustar zuulzuul00000000000000===================== TLS support in Magnum ===================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/secure-kubernetes Currently there is no authentication in Magnum to provide access control to limit communication between the Magnum service and the Kubernetes service so that Kubernetes can not be controlled by a third party. This implementation closes this security loophole by using TLS as an access control mechanism. Only the Magnum server will have the key to communicate with any given Kubernetes API service under its control. An additional benefit of this approach is that communication over the network will be encrypted, reducing the chance of eavesdropping on the communication stream. Problem Description ------------------- Magnum currently controls Kubernetes API services using unauthenticated HTTP. If an attacker knows the api_address of a Kubernetes Bay, (s)he can control the cluster without any access control. Use Cases --------- 1. Operators expect system level control to be protected by access control that is consistent with industry best practices. Lack of this feature may result in rejection of Magnum as an option for hosting containerized workloads. Proposed Changes ---------------- The complete implementation of TLS support in Magnum can be further decomposed into below smaller implementations. 1. TLS support in Kubernetes Client Code. ----------------------------------------- The current implementation of Kubernetes Client code doesn't have any authentication. So this implementation will change the client code to provide authentication using TLS. Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/tls-pythonk8sclient 2. Generating certificates ---------------------------- This task is mainly on how certificates for both client(magnum-conductor) and server(kube-apiserver) will be generated and who will be the certificate authority(CA). These files can be generated in two ways: 2.1. Magnum script ------------------- This implementation will use standard tool to generate certificates and keys. This script will be registered on Kubernetes master node while creating bay. This script will generate certificates, start the secure kube-apiserver and then register the client certificates at Magnum. 2.2. Using Barbican ------------------- Barbican can also be used as a CA using Dogtag. This implementation will use Barbican to generate certificates. 3. TLS Support in Magnum code ------------------------------ This work mainly involves deploying a secure bay and supporting the use of certificates in Magnum to call Kubernetes API. This implementation can be decomposed into smaller tasks. 3.1. Create secure bay ---------------------- This implementation will deploy a secure kube-apiserver running on Kubernetes master node. To do so following things needs to be done: * Generate certificates * Copy certificates * Start a secure kube-apiserver 3.1.1. Generate certificates ---------------------------- The certificates will be generated using any of the above implementation in section 2. 3.1.2. Copy certificates ------------------------ This depends on how cert and key is generated, the implementation will differ with each case. 3.1.2.1. Using Magnum script ---------------------------- This script will generate both server and client certificates on Kubernetes master node. Hence only client certificates needs to be copied to magnum host node. To copy these files, the script will make a call to magnum-api to store files. 3.1.2.2. Using Barbican ----------------------- When using Barbican, the cert and key will be generated and stored in Barbican itself. Either magnum-conductor can fetch the certificates from Barbican and copy on Kubernetes master node or it can be fetched from Kubernetes master node also. 3.1.3. Start a secure kube-apiserver ------------------------------------ Above generated certificates will be used to start a secure kube-apiserver running on Kubernetes master node. Now that we have a secure Kubernetes cluster running, any API call to Kubernetes will be secure. 3.2. Support https ------------------ While running any Kubernetes resource related APIs, magnum-conductor will fetch certificate from magnum database or Barbican and use it to make secure API call. 4. Barbican support to store certificates securely ---------------------------------------------------- Barbican is a REST API designed for the secure storage, provisioning and management of secrets. The client cert and key must be stored securely. This implementation will support Barbican in Magnum to store the sensitive data. Data model impact ----------------- New table 'cert' will be introduced to store the certificates. REST API impact --------------- New API /certs will be introduced to store the certificates. Security impact --------------- After this support, Magnum will be secure to be used in actual production environment. Now all the communication to Kubernetes master node will be secure. The certificates will be generated by Barbican or standard tool signed by trusted CAs. The certificates will be stored safely in Barbican when the Barbican cert storage option is selected by the administrator. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ None Other deployer impact --------------------- Deployer will need to install Barbican to store certificates. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee madhuri(Madhuri Kumari) yuanying(Motohiro Otsuka) Work Items ---------- 1. TLS Support in Kubernetes Client code 2. Support for generating keys in Magnum 3. Support creating secure Kubernetes cluster 4. Support Barbican in Magnum to store certificates Dependencies ------------ Barbican(optional) Testing ------- Each commit will be accompanied with unit tests. There will also be functional test to test both good and bad certificates. Documentation Impact -------------------- Add a document explaining how TLS cert and keys can be generated and guide updated with how to use the secure model of bays. References ---------- None magnum-6.1.0/.zuul.yaml0000666000175100017510000001537513244017334015005 0ustar zuulzuul00000000000000- job: name: magnum-functional-base parent: legacy-dsvm-base timeout: 7800 nodeset: legacy-ubuntu-xenial pre-run: playbooks/pre/prepare-workspace.yaml run: playbooks/magnum-functional-base.yaml post-run: playbooks/post/upload-logs.yaml required-projects: - openstack-infra/devstack-gate - openstack/diskimage-builder - openstack/ironic - openstack/ironic-lib - openstack/ironic-python-agent - openstack/magnum - openstack/magnum-tempest-plugin - openstack/pyghmi - openstack/python-ironicclient - openstack/python-magnumclient - openstack/virtualbmc irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ vars: ironic: 0 ceilometer: 0 swift: 0 horizon: 0 multinode: 0 neutron: 1 tempest: 0 branch_override: default - job: name: magnum-functional-multinode-base parent: legacy-dsvm-base-multinode timeout: 7800 nodeset: legacy-ubuntu-xenial-2-node pre-run: playbooks/pre/prepare-workspace.yaml run: playbooks/magnum-functional-base.yaml post-run: playbooks/post/upload-logs.yaml required-projects: - openstack-infra/devstack-gate - openstack/diskimage-builder - openstack/ironic - openstack/ironic-lib - openstack/ironic-python-agent - openstack/magnum - openstack/pyghmi - openstack/python-ironicclient - openstack/python-magnumclient - openstack/virtualbmc irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ vars: ironic: 0 ceilometer: 0 swift: 0 horizon: 0 multinode: 1 neutron: 1 tempest: 0 branch_override: default - job: name: magnum-functional-api parent: magnum-functional-base vars: coe: api - job: name: magnum-functional-k8s parent: magnum-functional-base voting: false vars: coe: k8s irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/swarm.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-swarm-mode parent: magnum-functional-base voting: false branches: ^(?!stable/(newton|ocata)).*$ vars: coe: swarm-mode irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/swarm_fedora_atomic_v1/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-dcos parent: magnum-functional-base voting: false branches: ^(?!stable/(newton|ocata)).*$ vars: coe: dcos irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/mesos.*$ - ^magnum/drivers/swarm.*$ - job: name: magnum-functional-mesos parent: magnum-functional-base voting: false vars: coe: mesos irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/swarm.*$ - job: name: magnum-functional-swarm parent: magnum-functional-base voting: false vars: coe: swarm irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/swarm_fedora_atomic_v2/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-k8s-ironic parent: magnum-functional-base voting: false vars: coe: k8s ironic: 1 irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/swarm.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-swarm-ironic parent: magnum-functional-base voting: false vars: coe: swarm ironic: 1 irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-k8s-multinode parent: magnum-functional-multinode-base voting: false vars: coe: k8s irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/swarm.*$ - ^magnum/drivers/mesos.*$ - job: name: magnum-functional-swarm-mode-multinode parent: magnum-functional-multinode-base voting: false branches: ^(?!stable/(newton|ocata)).*$ vars: coe: swarm-mode irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/drivers/k8s.*$ - ^magnum/drivers/mesos.*$ - ^magnum/drivers/swarm_fedora_atomic_v1/.*$ - job: name: magnum-buildimages-base parent: legacy-publish-openstack-artifacts timeout: 3600 nodeset: legacy-ubuntu-xenial pre-run: playbooks/pre/prepare-workspace-images.yaml run: playbooks/magnum-buildimages-base.yaml post-run: playbooks/post/upload-images.yaml required-projects: - openstack/dib-utils - openstack/diskimage-builder - openstack/magnum - job: name: magnum-dib-buildimage-fedora-atomic-25 parent: magnum-buildimages-base vars: image_name: fedora-atomic-25 - job: name: magnum-dib-buildimage-ubuntu-mesos parent: magnum-buildimages-base vars: image_name: ubuntu-mesos - job: name: magnum-dib-buildimage-centos-dcos parent: magnum-buildimages-base vars: image_name: centos-dcos - project: check: jobs: - magnum-functional-api - magnum-functional-k8s - magnum-functional-swarm-mode gate: jobs: - magnum-functional-api experimental: jobs: - magnum-functional-dcos - magnum-functional-mesos - magnum-functional-swarm - magnum-functional-k8s-ironic - magnum-functional-swarm-ironic - magnum-functional-k8s-multinode - magnum-functional-swarm-mode-multinode periodic: jobs: - magnum-dib-buildimage-fedora-atomic-25 - magnum-dib-buildimage-ubuntu-mesos - magnum-dib-buildimage-centos-dcos magnum-6.1.0/PKG-INFO0000664000175100017510000000354113244017675014137 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: magnum Version: 6.1.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/magnum.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on ====== Magnum ====== Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. For more information, please refer to the following resources: * **Free software:** under the `Apache license `_ * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Source:** http://git.openstack.org/cgit/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** http://bugs.launchpad.net/magnum * **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 magnum-6.1.0/test-requirements.txt0000666000175100017510000000157513244017343017302 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Despite above warning added by global sync process, please use # ascii betical order. bandit>=1.1.0 # Apache-2.0 bashate>=0.5.1 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 mock>=2.0.0 # BSD openstackdocstheme>=1.18.1 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0 python-subunit>=1.0.0 # Apache-2.0/BSD pytz>=2013.6 # MIT sphinx!=1.6.6,>=1.6.2 # BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT # releasenotes reno>=2.5.0 # Apache-2.0 magnum-6.1.0/tools/0000775000175100017510000000000013244017675014177 5ustar zuulzuul00000000000000magnum-6.1.0/tools/pretty_tox.sh0000777000175100017510000000065213244017334016754 0ustar zuulzuul00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi magnum-6.1.0/tools/cover.sh0000777000175100017510000000470613244017334015655 0ustar zuulzuul00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING=0 show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } if ! git diff --exit-code || ! git diff --cached --exit-code then echo "There are uncommitted changes!" echo "Please clean git working directory and try again" exit 1 fi # Checkout master and save coverage report git checkout HEAD^ base_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` baseline_report=$(mktemp -t magnum_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" coverage report > $baseline_report mv cover cover-master cat $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) # Checkout back and save coverage report git checkout - current_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` current_report=$(mktemp -t magnum_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" coverage report > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING+current_op_count-base_op_count)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -ge $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "We believe you can test your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we must maintain our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code magnum-6.1.0/tools/flake8wrap.sh0000777000175100017510000000100313244017334016566 0ustar zuulzuul00000000000000#!/bin/sh # # A simple wrapper around flake8 which makes it possible # to ask it to only verify files changed in the current # git HEAD patch. # # Intended to be invoked via tox: # # tox -epep8 -- -HEAD # if test "x$1" = "x-HEAD" ; then shift files=$(git diff --name-only HEAD~1 | tr '\n' ' ') echo "Running flake8 on ${files}" diff -u --from-file /dev/null ${files} | flake8 --max-complexity 10 --diff "$@" else echo "Running flake8 on all files" exec flake8 --max-complexity 10 "$@" fi magnum-6.1.0/setup.cfg0000666000175100017510000000465113244017675014670 0ustar zuulzuul00000000000000[metadata] name = magnum summary = Container Management project for OpenStack description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/magnum/latest/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] data_files = etc/magnum = etc/magnum/api-paste.ini packages = magnum [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 warning-is-error = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = magnum/locale domain = magnum [update_catalog] domain = magnum output_dir = magnum/locale input_file = magnum/locale/magnum.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = magnum/locale/magnum.pot [entry_points] console_scripts = magnum-api = magnum.cmd.api:main magnum-conductor = magnum.cmd.conductor:main magnum-db-manage = magnum.cmd.db_manage:main magnum-driver-manage = magnum.cmd.driver_manage:main oslo.config.opts = magnum = magnum.opts:list_opts magnum.conf = magnum.conf.opts:list_opts oslo.config.opts.defaults = magnum = magnum.common.config:set_cors_middleware_defaults oslo.policy.policies = magnum = magnum.common.policies:list_rules magnum.drivers = k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver mesos_ubuntu_v1 = magnum.drivers.mesos_ubuntu_v1.driver:Driver k8s_fedora_ironic_v1 = magnum.drivers.k8s_fedora_ironic_v1.driver:Driver magnum.database.migration_backend = sqlalchemy = magnum.db.sqlalchemy.migration magnum.cert_manager.backend = barbican = magnum.common.cert_manager.barbican_cert_manager local = magnum.common.cert_manager.local_cert_manager x509keypair = magnum.common.cert_manager.x509keypair_cert_manager [wheel] universal = 1 [extras] osprofiler = osprofiler>=1.4.0 # Apache-2.0 [egg_info] tag_build = tag_date = 0 magnum-6.1.0/functional_creds.conf.sample0000666000175100017510000000076413244017334020511 0ustar zuulzuul00000000000000# Credentials for functional testing [auth] auth_url = http://127.0.0.1:5000/v3 magnum_url = http://127.0.0.1:9511/v1 username = demo project_name = demo project_domain_id = default user_domain_id = default password = password auth_version = v3 insecure=False [admin] user = admin project_name = admin pass = password project_domain_id = default user_domain_id = default [magnum] image_id = fedora-atomic-latest nic_id = public keypair_id = default flavor_id = s1.magnum master_flavor_id = m1.magnum magnum-6.1.0/HACKING.rst0000666000175100017510000000176313244017334014636 0ustar zuulzuul00000000000000Magnum Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Magnum Specific Commandments ---------------------------- - [M302] Change assertEqual(A is not None) by optimal assert like assertIsNotNone(A). - [M310] timeutils.utcnow() wrapper must be used instead of direct calls to datetime.datetime.utcnow() to make it easy to override its return value. - [M316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [M322] Method's default argument shouldn't be mutable. - [M336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [M338] Use assertIn/NotIn(A, B) rather than assertEqual(A in B, True/False). - [M339] Don't use xrange() - [M340] Check for explicit import of the _ function. - [M352] LOG.warn is deprecated. Enforce use of LOG.warning. - [M353] String interpolation should be delayed at logging calls. magnum-6.1.0/.mailmap0000666000175100017510000000013113244017334014445 0ustar zuulzuul00000000000000# Format is: # # magnum-6.1.0/doc/0000775000175100017510000000000013244017675013604 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/0000775000175100017510000000000013244017675015422 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/0000775000175100017510000000000013244017675016175 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/logrotate.d/0000775000175100017510000000000013244017675020417 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/logrotate.d/magnum.logrotate0000666000175100017510000000014113244017334023613 0ustar zuulzuul00000000000000/var/log/magnum/*.log { rotate 14 size 10M missingok compress copytruncate } magnum-6.1.0/doc/examples/etc/systemd/0000775000175100017510000000000013244017675017665 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/systemd/system/0000775000175100017510000000000013244017675021211 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/systemd/system/magnum-api.service0000666000175100017510000000041613244017334024621 0ustar zuulzuul00000000000000[Unit] Description=OpenStack Magnum API Service After=syslog.target network.target [Service] Type=simple User=magnum ExecStart=/var/lib/magnum/env/bin/magnum-api PrivateTmp=true NotifyAccess=all KillMode=process Restart=on-failure [Install] WantedBy=multi-user.target magnum-6.1.0/doc/examples/etc/systemd/system/magnum-conductor.service0000666000175100017510000000050413244017334026046 0ustar zuulzuul00000000000000[Unit] Description=Openstack Magnum Conductor Service After=syslog.target network.target qpidd.service mysqld.service tgtd.service [Service] Type=simple User=magnum ExecStart=/var/lib/magnum/env/bin/magnum-conductor PrivateTmp=true NotifyAccess=all KillMode=process Restart=on-failure [Install] WantedBy=multi-user.target magnum-6.1.0/doc/examples/etc/init/0000775000175100017510000000000013244017675017140 5ustar zuulzuul00000000000000magnum-6.1.0/doc/examples/etc/init/magnum-api.conf0000666000175100017510000000047413244017334022041 0ustar zuulzuul00000000000000description "Magnum API server" start on runlevel [2345] stop on runlevel [!2345] respawn exec start-stop-daemon --start --chuid magnum \ --chdir /var/lib/magnum \ --name magnum-api \ --exec /var/lib/magnum/env/bin/magnum-api -- \ --config-file=/etc/magnum/magnum.conf \ --log-file=/var/log/magnum/magnum-api.log magnum-6.1.0/doc/examples/etc/init/magnum-conductor.conf0000666000175100017510000000051513244017334023264 0ustar zuulzuul00000000000000description "Magnum conductor" start on runlevel [2345] stop on runlevel [!2345] respawn exec start-stop-daemon --start --chuid magnum \ --chdir /var/lib/magnum \ --name magnum-conductor \ --exec /var/lib/magnum/env/bin/magnum-conductor -- \ --config-file=/etc/magnum/magnum.conf \ --log-file=/var/log/magnum/magnum-conductor.log magnum-6.1.0/doc/source/0000775000175100017510000000000013244017675015104 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/install/0000775000175100017510000000000013244017675016552 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/install/install-rdo.rst0000666000175100017510000000235213244017334021530 0ustar zuulzuul00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # yum install openstack-magnum-api openstack-magnum-conductor python-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst * Additionally, edit the ``/etc/magnum/magnum.conf`` file: * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lib/magnum/tmp .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Start the Container Infrastructure Management services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-magnum-api.service \ openstack-magnum-conductor.service # systemctl start openstack-magnum-api.service \ openstack-magnum-conductor.service magnum-6.1.0/doc/source/install/install-debian-manual.rst0000666000175100017510000000142313244017334023437 0ustar zuulzuul00000000000000.. _install-debian-manual: Install and configure for Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Debian. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the common and library packages: .. code-block:: console # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Restart the Container Infrastructure Management services: .. code-block:: console # service magnum-api restart # service magnum-conductor restart magnum-6.1.0/doc/source/install/next-steps.rst0000666000175100017510000000034513244017334021412 0ustar zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the magnum service. To add more services, see the `additional documentation on installing OpenStack `_ . magnum-6.1.0/doc/source/install/install.rst0000666000175100017510000000375713244017334020760 0ustar zuulzuul00000000000000.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service, code-named magnum, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Identity service, Image service, Compute service, Networking service, Block Storage service and Orchestration service. See `OpenStack Install Guides `__. To provide access to Docker Swarm or Kubernetes using the native clients (docker or kubectl, respectively) magnum uses TLS certificates. To store the certificates, it is recommended to use the `Key Manager service, code-named barbican `__, or you can save them in magnum's database. Optionally, you can install the following components: - `Load Balancer as a Service (LBaaS v2) `__ to create clusters with multiple masters - `Bare Metal service `__ to create baremetal clusters - `Object Storage service `__ to make private Docker registries available to users - `Telemetry Data Collection service `__ to periodically send magnum-related metrics .. note:: Installation and configuration vary by distribution. .. important:: Magnum creates clusters of compute instances on the Compute service (nova). These instances must have basic Internet connectivity and must be able to reach magnum's API server. Make sure that the Compute and Network services are configured accordingly. .. toctree:: :maxdepth: 2 install-debian-manual.rst install-obs.rst install-rdo.rst install-ubuntu.rst install-guide-from-source.rst magnum-6.1.0/doc/source/install/common/0000775000175100017510000000000013244017675020042 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/install/common/configure_2_edit_magnum_conf.rst0000666000175100017510000000605213244017334026351 0ustar zuulzuul000000000000002. Edit the ``/etc/magnum/magnum.conf`` file: * In the ``[api]`` section, configure the host: .. code-block:: ini [api] ... host = CONTROLLER_IP Replace ``CONTROLLER_IP`` with the IP address on which you wish magnum api should listen. * In the ``[certificates]`` section, select ``barbican`` (or ``x509keypair`` if you don't have barbican installed): * Use barbican to store certificates: .. code-block:: ini [certificates] ... cert_manager_type = barbican .. important:: Barbican is recommended for production environments. * To store x509 certificates in magnum's database: .. code-block:: ini [certificates] ... cert_manager_type = x509keypair * In the ``[cinder_client]`` section, configure the region name: .. code-block:: ini [cinder_client] ... region_name = RegionOne * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://magnum:MAGNUM_DBPASS@controller/magnum Replace ``MAGNUM_DBPASS`` with the password you chose for the magnum database. * In the ``[keystone_authtoken]`` and ``[trust]`` sections, configure Identity service access: .. code-block:: ini [keystone_authtoken] ... memcached_servers = controller:11211 auth_version = v3 auth_uri = http://controller:5000/v3 project_domain_id = default project_name = service user_domain_id = default password = MAGNUM_PASS username = magnum auth_url = http://controller:35357 auth_type = password admin_user = magnum admin_password = MAGNUM_PASS admin_tenant_name = service [trust] ... trustee_domain_name = magnum trustee_domain_admin_name = magnum_domain_admin trustee_domain_admin_password = DOMAIN_ADMIN_PASS trustee_keystone_interface = KEYSTONE_INTERFACE Replace MAGNUM_PASS with the password you chose for the magnum user in the Identity service and DOMAIN_ADMIN_PASS with the password you chose for the ``magnum_domain_admin`` user. Replace KEYSTONE_INTERFACE with either ``public`` or ``internal`` depending on your network configuration. If your instances cannot reach internal keystone endpoint which is often the case in production environments it should be set to ``public``. Default to ``public`` * In the ``[oslo_messaging_notifications]`` section, configure the ``driver``: .. code-block:: ini [oslo_messaging_notifications] ... driver = messaging * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. magnum-6.1.0/doc/source/install/common/prerequisites.rst0000666000175100017510000001721013244017334023473 0ustar zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Container Infrastructure Management service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``magnum`` database: .. code-block:: console CREATE DATABASE magnum; * Grant proper access to the ``magnum`` database: .. code-block:: console GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'localhost' \ IDENTIFIED BY 'MAGNUM_DBPASS'; GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'%' \ IDENTIFIED BY 'MAGNUM_DBPASS'; Replace ``MAGNUM_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: * Create the ``magnum`` user: .. code-block:: console $ openstack user create --domain default \ --password-prompt magnum User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | a8ebafc275c54d389dfc1bff8b4fe286 | | name | magnum | +-----------+----------------------------------+ * Add the ``admin`` role to the ``magnum`` user: .. code-block:: console $ openstack role add --project service --user magnum admin .. note:: This command provides no output. * Create the ``magnum`` service entity: .. code-block:: console $ openstack service create --name magnum \ --description "OpenStack Container Infrastructure Management Service" \ container-infra +-------------+-------------------------------------------------------+ | Field | Value | +-------------+-------------------------------------------------------+ | description | OpenStack Container Infrastructure Management Service | | enabled | True | | id | 194faf83e8fd4e028e5ff75d3d8d0df2 | | name | magnum | | type | container-infra | +-------------+-------------------------------------------------------+ #. Create the Container Infrastructure Management service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ container-infra public http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | cb137e6366ad495bb521cfe92d8b8858 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ container-infra internal http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 17cbc3b6f51449a0a818118d6d62868d | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ container-infra admin http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 30f8888e6b6646d7b5cd14354c95a684 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ Replace ``CONTROLLER_IP`` with the IP magnum listens to. Alternatively, you can use a hostname which is reachable by the Compute instances. #. Magnum requires additional information in the Identity service to manage COE clusters. To add this information, complete these steps: * Create the ``magnum`` domain that contains projects and users: .. code-block:: console $ openstack domain create --description "Owns users and projects \ created by magnum" magnum +-------------+-------------------------------------------+ | Field | Value | +-------------+-------------------------------------------+ | description | Owns users and projects created by magnum | | enabled | True | | id | 66e0469de9c04eda9bc368e001676d20 | | name | magnum | +-------------+-------------------------------------------+ * Create the ``magnum_domain_admin`` user to manage projects and users in the ``magnum`` domain: .. code-block:: console $ openstack user create --domain magnum --password-prompt \ magnum_domain_admin User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | 66e0469de9c04eda9bc368e001676d20 | | enabled | True | | id | 529b81cf35094beb9784c6d06c090c2b | | name | magnum_domain_admin | +-----------+----------------------------------+ * Add the ``admin`` role to the ``magnum_domain_admin`` user in the ``magnum`` domain to enable administrative management privileges by the ``magnum_domain_admin`` user: .. code-block:: console $ openstack role add --domain magnum --user-domain magnum --user \ magnum_domain_admin admin .. note:: This command provides no output. magnum-6.1.0/doc/source/install/common/configure_3_populate_database.rst0000666000175100017510000000016513244017334026530 0ustar zuulzuul000000000000003. Populate Magnum database: .. code-block:: console # su -s /bin/sh -c "magnum-db-manage upgrade" magnum magnum-6.1.0/doc/source/install/verify.rst0000666000175100017510000000153613244017334020607 0ustar zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Container Infrastructure Management service. .. note:: Perform these commands on the controller node. #. Source the ``admin`` tenant credentials: .. code-block:: console $ . admin-openrc #. To list out the health of the internal services, namely conductor, of magnum, use: .. code-block:: console $ openstack coe service list +----+-----------------------+------------------+-------+ | id | host | binary | state | +----+-----------------------+------------------+-------+ | 1 | controller | magnum-conductor | up | +----+-----------------------+------------------+-------+ .. note:: This output should indicate a ``magnum-conductor`` component on the controller node. magnum-6.1.0/doc/source/install/get_started.rst0000666000175100017510000000137213244017334021606 0ustar zuulzuul00000000000000==================================================== Container Infrastructure Management service overview ==================================================== The Container Infrastructure Management service consists of the following components: ``magnum`` command-line client A CLI that communicates with the ``magnum-api`` to create and manage container clusters. End developers can directly use the magnum REST API. ``magnum-api`` service An OpenStack-native REST API that processes API requests by sending them to the ``magnum-conductor`` via AMQP. ``magnum-conductor`` service Runs on a controller machine and connects to heat to orchestrate a cluster. Additionally, it connects to a Docker Swarm, Kubernetes or Mesos REST API endpoint. magnum-6.1.0/doc/source/install/install-ubuntu.rst0000666000175100017510000000145413244017334022270 0ustar zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Ubuntu 14.04 (LTS). .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the common and library packages: .. code-block:: console # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor python-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Restart the Container Infrastructure Management services: .. code-block:: console # service magnum-api restart # service magnum-conductor restart magnum-6.1.0/doc/source/install/install-guide-from-source.rst0000666000175100017510000001652613244017334024310 0ustar zuulzuul00000000000000.. _install-guide-from-source: Install from source code and configure ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for from source code. .. include:: common/prerequisites.rst Install and configure components -------------------------------- 1. Install Magnum from source: a. Install OS-specific prerequisites: * Ubuntu 16.04 (xenial) or higher: .. code-block:: console # apt update # apt install python-dev libssl-dev libxml2-dev \ libmysqlclient-dev libxslt-dev libpq-dev git \ libffi-dev gettext build-essential * Fedora 21 / Centos 7 / RHEL 7 .. code-block:: console # yum install python-devel openssl-devel mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc * Fedora 22 or higher .. code-block:: console # dnf install python-devel openssl-devel mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc * openSUSE Leap 42.1 .. code-block:: console # zypper install git libffi-devel libmysqlclient-devel \ libopenssl-devel libxml2-devel libxslt-devel \ postgresql-devel python-devel gettext-runtime gcc b. Create magnum user and necessary directories: * Create user: .. code-block:: console # groupadd --system magnum # useradd --home-dir "/var/lib/magnum" \ --create-home \ --system \ --shell /bin/false \ -g magnum \ magnum * Create directories: .. code-block:: console # mkdir -p /var/log/magnum # mkdir -p /etc/magnum * Set ownership to directories: .. code-block:: console # chown magnum:magnum /var/log/magnum # chown magnum:magnum /var/lib/magnum # chown magnum:magnum /etc/magnum c. Install virtualenv and python prerequisites: * Install virtualenv and create one for magnum's installation: .. code-block:: console # easy_install -U virtualenv # su -s /bin/sh -c "virtualenv /var/lib/magnum/env" magnum * Install python prerequisites: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install tox pymysql \ python-memcached" magnum d. Clone and install magnum: .. code-block:: console # cd /var/lib/magnum # git clone https://git.openstack.org/openstack/magnum.git # chown -R magnum:magnum magnum # cd magnum # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install -r requirements.txt" magnum # su -s /bin/sh -c "/var/lib/magnum/env/bin/python setup.py install" magnum e. Copy api-paste.ini: .. code-block:: console # su -s /bin/sh -c "cp etc/magnum/api-paste.ini /etc/magnum" magnum f. Generate a sample configuration file: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/tox -e genconfig" magnum # su -s /bin/sh -c "cp etc/magnum/magnum.conf.sample /etc/magnum/magnum.conf" magnum e. Optionally, if you want to customize the policies for Magnum API accesses, you can generate a sample policy file, put it into ``/etc/magnum`` folder for further modifications: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/tox -e genpolicy" magnum # su -s /bin/sh -c "cp etc/magnum/policy.yaml.sample /etc/magnum/policy.yaml" magnum .. include:: common/configure_2_edit_magnum_conf.rst * Additionally, edit the ``/etc/magnum/magnum.conf`` file: * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lib/magnum/tmp * If you decide to customize Magnum policies in ``1.e``, then in the ``[oslo_policy]`` section, configure the ``policy_file``: .. code-block:: ini [oslo_policy] ... policy_file = /etc/magnum/policy.yaml .. note:: Make sure that ``/etc/magnum/magnum.conf`` still have the correct permissions. You can set the permissions again with: # chown magnum:magnum /etc/magnum/magnum.conf 3. Populate Magnum database: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/magnum-db-manage upgrade" magnum 4. Set magnum for log rotation: .. code-block:: console # cd /var/lib/magnum/magnum # cp doc/examples/etc/logrotate.d/magnum.logrotate /etc/logrotate.d/magnum Finalize installation --------------------- #. Create init scripts and services: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # cd /var/lib/magnum/magnum # cp doc/examples/etc/systemd/system/magnum-api.service \ /etc/systemd/system/magnum-api.service # cp doc/examples/etc/systemd/system/magnum-conductor.service \ /etc/systemd/system/magnum-conductor.service #. Start magnum-api and magnum-conductor: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # systemctl enable magnum-api # systemctl enable magnum-conductor .. code-block:: console # systemctl start magnum-api # systemctl start magnum-conductor #. Verify that magnum-api and magnum-conductor services are running: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # systemctl status magnum-api # systemctl status magnum-conductor Install the command-line client ------------------------------- #. Install OS-specific prerequisites: * Fedora 21/RHEL 7/CentOS 7 .. code-block:: console # yum install python-devel openssl-devel python-virtualenv \ libffi-devel git gcc * Fedora 22 or higher .. code-block:: console # dnf install python-devel openssl-devel python-virtualenv \ libffi-devel git gcc * Ubuntu .. code-block:: console # apt update # apt install python-dev libssl-dev python-virtualenv \ libffi-dev git gcc * openSUSE Leap 42.1 .. code-block:: console # zypper install python-devel libopenssl-devel python-virtualenv \ libffi-devel git gcc #. Install the client in a virtual environment: .. code-block:: console $ cd ~ $ git clone https://git.openstack.org/openstack/python-magnumclient.git $ cd python-magnumclient $ virtualenv .magnumclient-env $ .magnumclient-env/bin/pip install -r requirements.txt $ .magnumclient-env/bin/python setup.py install #. Now, you can export the client in your PATH: .. code-block:: console $ export PATH=$PATH:${PWD}/.magnumclient-env/bin/magnum .. note:: The command-line client can be installed on the controller node or on a different host than the service. It is good practice to install it as a non-root user. magnum-6.1.0/doc/source/install/index.rst0000666000175100017510000000142313244017334020405 0ustar zuulzuul00000000000000========================= Magnum Installation Guide ========================= .. toctree:: :maxdepth: 2 get_started.rst install.rst verify.rst launch-instance.rst next-steps.rst The Container Infrastructure Management service codenamed (magnum) is an OpenStack API service developed by the OpenStack Containers Team making container orchestration engines (COE) such as Docker Swarm, Kubernetes and Mesos available as first class resources in OpenStack. Magnum uses Heat to orchestrate an OS image which contains Docker and Kubernetes and runs that image in either virtual machines or bare metal in a cluster configuration. This chapter assumes a working setup of OpenStack following `OpenStack Installation Tutorial `_. magnum-6.1.0/doc/source/install/launch-instance.rst0000666000175100017510000006227613244017334022367 0ustar zuulzuul00000000000000.. _launch-instance: Launch an instance ~~~~~~~~~~~~~~~~~~ In environments that include the Container Infrastructure Management service, you can provision container clusters made up of virtual machines or baremetal servers. The Container Infrastructure Management service uses `Cluster Templates `__ to describe how a `Cluster `__ is constructed. In each of the following examples you will create a Cluster Template for a specific COE and then you will provision a Cluster using the corresponding Cluster Template. Then, you can use the appropriate COE client or endpoint to create containers. Create an external network (Optional) ------------------------------------- To create a magnum cluster, you need an external network. If there are no external networks, create one. #. Create an external network with an appropriate provider based on your cloud provider support for your case: .. code-block:: console $ openstack network create public --provider-network-type vxlan \ --external \ --project service +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-03-27T10:09:04Z | | description | | | dns_domain | None | | id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | mtu | 1450 | | name | public | | port_security_enabled | True | | project_id | 224c32c0dd2e49cbaadfd1cda069f149 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 3 | | qos_policy_id | None | | revision_number | 4 | | router:external | External | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | updated_at | 2017-03-27T10:09:04Z | +---------------------------+--------------------------------------+ $ openstack subnet create public-subnet --network public \ --subnet-range 192.168.1.0/24 \ --gateway 192.168.1.1 \ --ip-version 4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 192.168.1.2-192.168.1.254 | | cidr | 192.168.1.0/24 | | created_at | 2017-03-27T10:46:15Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.168.1.1 | | host_routes | | | id | 04185f6c-ea31-4109-b20b-fd7f935b3828 | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | public-subnet | | network_id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | | project_id | d9e40a0aff30441083d9f279a0ff50de | | revision_number | 2 | | segment_id | None | | service_types | | | subnetpool_id | None | | updated_at | 2017-03-27T10:46:15Z | +-------------------+--------------------------------------+ Create a keypair (Optional) --------------------------- To create a magnum cluster, you need a keypair which will be passed in all compute instances of the cluster. If you don't have a keypair in your project, create one. #. Create a keypair on the Compute service: .. code-block:: console $ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey +-------------+-------------------------------------------------+ | Field | Value | +-------------+-------------------------------------------------+ | fingerprint | 05:be:32:07:58:a7:e8:0b:05:9b:81:6d:80:9a:4e:b1 | | name | mykey | | user_id | 2d4398dbd5274707bf100a9dbbe85819 | +-------------+-------------------------------------------------+ Upload the images required for your clusters to the Image service ----------------------------------------------------------------- The VM versions of Kubernetes and Docker Swarm drivers require a Fedora Atomic image. The following is stock Fedora Atomic image, built by the Atomic team and tested by the Magnum team. #. Download the image: .. code-block:: console $ wget https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2 #. Register the image to the Image service setting the ``os_distro`` property to ``fedora-atomic``: .. code-block:: console $ openstack image create \ --disk-format=qcow2 \ --container-format=bare \ --file=Fedora-Atomic-27-20180212.2.x86_64.qcow2\ --property os_distro='fedora-atomic' \ fedora-atomic-latest +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | a987b691e23dce54c03d7a57c104b195 | | container_format | bare | | created_at | 2016-09-14T12:58:01Z | | disk_format | qcow2 | | file | /v2/images/81b25935-3400-441a-9f2e-f984a46c89dd/file | | id | 81b25935-3400-441a-9f2e-f984a46c89dd | | min_disk | 0 | | min_ram | 0 | | name | fedora-atomic-latest | | owner | c4b42942156741dfbc4775dbcb032841 | | properties | os_distro='fedora-atomic' | | protected | False | | schema | /v2/schemas/image | | size | 507928064 | | status | active | | tags | | | updated_at | 2016-09-14T12:58:03Z | | virtual_size | None | | visibility | private | +------------------+------------------------------------------------------+ Provision a Docker Swarm cluster and create a container ------------------------------------------------------- Following this example, you will provision a Docker Swarm cluster with one master and one node. Then, using docker's native API you will create a container. #. Create a cluster template for a Docker Swarm cluster using the ``fedora-atomic-latest`` image, ``m1.small`` as the flavor for the master and the node, ``public`` as the external network and ``8.8.8.8`` for the DNS nameserver, using the following command: .. code-block:: console $ openstack coe cluster template create swarm-cluster-template \ --image fedora-atomic-latest \ --external-network public \ --dns-nameserver 8.8.8.8 \ --master-flavor m1.small \ --flavor m1.small \ --coe swarm +-----------------------+--------------------------------------+ | Property | Value | +-----------------------+--------------------------------------+ | insecure_registry | - | | labels | {} | | updated_at | - | | floating_ip_enabled | True | | fixed_subnet | - | | master_flavor_id | m1.small | | uuid | 47c6ce77-50ae-43bd-8e2a-06980392693d | | no_proxy | - | | https_proxy | - | | tls_disabled | False | | keypair_id | mykey | | public | False | | http_proxy | - | | docker_volume_size | - | | server_type | vm | | external_network_id | public | | cluster_distro | fedora-atomic | | image_id | fedora-atomic-latest | | volume_driver | - | | registry_enabled | False | | docker_storage_driver | devicemapper | | apiserver_port | - | | name | swarm-cluster-template | | created_at | 2016-09-14T13:05:11+00:00 | | network_driver | docker | | fixed_network | - | | coe | swarm | | flavor_id | m1.small | | master_lb_enabled | False | | dns_nameserver | 8.8.8.8 | +-----------------------+--------------------------------------+ #. Create a cluster with one node and one master using ``mykey`` as the keypair, using the following command: .. code-block:: console $ openstack coe cluster create swarm-cluster \ --cluster-template swarm-cluster-template \ --master-count 1 \ --node-count 1 \ --keypair mykey Request to create cluster 2582f192-480e-4329-ac05-32a8e5b1166b has been accepted. Your cluster is now being created. Creation time depends on your infrastructure's performance. You can check the status of your cluster using the commands: ``openstack coe cluster list`` or ``openstack coe cluster show swarm-cluster``. .. code-block:: console $ openstack coe cluster list +--------------------------------------+---------------+---------+------------+--------------+-----------------+ | uuid | name | keypair | node_count | master_count | status | +--------------------------------------+---------------+---------+------------+--------------+-----------------+ | 2582f192-480e-4329-ac05-32a8e5b1166b | swarm-cluster | mykey | 1 | 1 | CREATE_COMPLETE | +--------------------------------------+---------------+---------+------------+--------------+-----------------+ .. code-block:: console $ openstack coe cluster show swarm-cluster +---------------------+------------------------------------------------------------+ | Property | Value | +---------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | cluster_template_id | 47c6ce77-50ae-43bd-8e2a-06980392693d | | uuid | 2582f192-480e-4329-ac05-32a8e5b1166b | | stack_id | 3d7bbf1c-49bd-4930-84e0-ab71ba200687 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-09-14T13:36:54+00:00 | | name | swarm-cluster | | updated_at | 2016-09-14T13:38:08+00:00 | | discovery_url | https://discovery.etcd.io/a5ece414689287eca62e35555512bfd5 | | api_address | tcp://172.24.4.10:2376 | | coe_version | 1.2.5 | | master_addresses | ['172.24.4.10'] | | create_timeout | 60 | | node_addresses | ['172.24.4.8'] | | master_count | 1 | | container_version | 1.12.6 | | node_count | 1 | +---------------------+------------------------------------------------------------+ #. Add the credentials of the above cluster to your environment: .. code-block:: console $ mkdir myclusterconfig $ $(openstack coe cluster config swarm-cluster --dir myclusterconfig) The above command will save the authentication artifacts in the `myclusterconfig` directory and it will export the environment variables: DOCKER_HOST, DOCKER_CERT_PATH and DOCKER_TLS_VERIFY. Sample output: .. code-block:: console export DOCKER_HOST=tcp://172.24.4.10:2376 export DOCKER_CERT_PATH=myclusterconfig export DOCKER_TLS_VERIFY=True #. Create a container: .. code-block:: console $ docker run busybox echo "Hello from Docker!" Hello from Docker! #. Delete the cluster: .. code-block:: console $ openstack coe cluster delete swarm-cluster Request to delete cluster swarm-cluster has been accepted. Provision a Kubernetes cluster and create a deployment ------------------------------------------------------ Following this example, you will provision a Kubernetes cluster with one master and one node. Then, using Kubernetes's native client ``kubectl``, you will create a deployment. #. Create a cluster template for a Kubernetes cluster using the ``fedora-atomic-latest`` image, ``m1.small`` as the flavor for the master and the node, ``public`` as the external network and ``8.8.8.8`` for the DNS nameserver, using the following command: .. code-block:: console $ openstack coe cluster template create kubernetes-cluster-template \ --image fedora-atomic-latest \ --external-network public \ --dns-nameserver 8.8.8.8 \ --master-flavor m1.small \ --flavor m1.small \ --coe kubernetes +-----------------------+--------------------------------------+ | Property | Value | +-----------------------+--------------------------------------+ | insecure_registry | - | | labels | {} | | updated_at | - | | floating_ip_enabled | True | | fixed_subnet | - | | master_flavor_id | m1.small | | uuid | 0a601cc4-8fef-41aa-8036-d113e719ed7a | | no_proxy | - | | https_proxy | - | | tls_disabled | False | | keypair_id | - | | public | False | | http_proxy | - | | docker_volume_size | - | | server_type | vm | | external_network_id | public | | cluster_distro | fedora-atomic | | image_id | fedora-atomic-latest | | volume_driver | - | | registry_enabled | False | | docker_storage_driver | devicemapper | | apiserver_port | - | | name | kubernetes-cluster-template | | created_at | 2017-05-16T09:53:00+00:00 | | network_driver | flannel | | fixed_network | - | | coe | kubernetes | | flavor_id | m1.small | | master_lb_enabled | False | | dns_nameserver | 8.8.8.8 | +-----------------------+--------------------------------------+ #. Create a cluster with one node and one master using ``mykey`` as the keypair, using the following command: .. code-block:: console $ openstack coe cluster create kubernetes-cluster \ --cluster-template kubernetes-cluster-template \ --master-count 1 \ --node-count 1 \ --keypair mykey Request to create cluster b1ef3528-ac03-4459-bbf7-22649bfbc84f has been accepted. Your cluster is now being created. Creation time depends on your infrastructure's performance. You can check the status of your cluster using the commands: ``openstack coe cluster list`` or ``openstack coe cluster show kubernetes-cluster``. .. code-block:: console $ openstack coe cluster list +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ | uuid | name | keypair | node_count | master_count | status | +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ | b1ef3528-ac03-4459-bbf7-22649bfbc84f | kubernetes-cluster | mykey | 1 | 1 | CREATE_COMPLETE | +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ .. code-block:: console $ openstack coe cluster show kubernetes-cluster +---------------------+------------------------------------------------------------+ | Property | Value | +---------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | cluster_template_id | 0a601cc4-8fef-41aa-8036-d113e719ed7a | | node_addresses | ['172.24.4.5'] | | uuid | b1ef3528-ac03-4459-bbf7-22649bfbc84f | | stack_id | 8296624c-3c0e-45e1-967e-b6ff05105a3b | | status_reason | Stack CREATE completed successfully | | created_at | 2017-05-16T09:58:02+00:00 | | updated_at | 2017-05-16T10:00:02+00:00 | | coe_version | v1.6.7 | | keypair | default | | api_address | https://172.24.4.13:6443 | | master_addresses | ['172.24.4.13'] | | create_timeout | 60 | | node_count | 1 | | discovery_url | https://discovery.etcd.io/69c7cd3b3b06c98b4771410bd166a7c6 | | master_count | 1 | | container_version | 1.12.6 | | name | kubernetes-cluster | +---------------------+------------------------------------------------------------+ #. Add the credentials of the above cluster to your environment: .. code-block:: console $ mkdir -p ~/clusters/kubernetes-cluster $ $(openstack coe cluster config kubernetes-cluster --dir ~/clusters/kubernetes-cluster) The above command will save the authentication artifacts in the directory ``~/clusters/kubernetes-cluster`` and it will export the ``KUBECONFIG`` environment variable: .. code-block:: console export KUBECONFIG=/home/user/clusters/kubernetes-cluster/config #. You can list the controller components of your Kubernetes cluster and check if they are ``Running``: .. code-block:: console $ kubectl -n kube-system get po NAME READY STATUS RESTARTS AGE kube-controller-manager-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kube-proxy-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kube-proxy-ku-wmmticfvdr-0-k53p22xmlxvx-kube-minion-x4ly6zfhrrui 1/1 Running 0 1h kube-scheduler-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kubernetes-dashboard-3203831700-zvj2d 1/1 Running 0 1h #. Now, you can create a nginx deployment and verify it is running: .. code-block:: console $ kubectl run nginx --image=nginx --replicas=5 deployment "nginx" created $ kubectl get po NAME READY STATUS RESTARTS AGE nginx-701339712-2ngt8 1/1 Running 0 15s nginx-701339712-j8r3d 1/1 Running 0 15s nginx-701339712-mb6jb 1/1 Running 0 15s nginx-701339712-q115k 1/1 Running 0 15s nginx-701339712-tb5lp 1/1 Running 0 15s #. Delete the cluster: .. code-block:: console $ openstack coe cluster delete kubernetes-cluster Request to delete cluster kubernetes-cluster has been accepted. magnum-6.1.0/doc/source/install/install-obs.rst0000666000175100017510000000202313244017334021522 0ustar zuulzuul00000000000000.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-magnum-api openstack-magnum-conductor python-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Start the Container Infrastructure Management services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-magnum-api.service \ openstack-magnum-conductor.service # systemctl start openstack-magnum-api.service \ openstack-magnum-conductor.service magnum-6.1.0/doc/source/admin/0000775000175100017510000000000013244017675016174 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/admin/magnum-proxy.rst0000666000175100017510000000603513244017334021367 0ustar zuulzuul00000000000000================================================= Using Proxies in magnum if running under firewall ================================================= If you are running magnum behind a firewall then you may need a proxy for using services like docker, kubernetes and mesos. Use these steps when your firewall will not allow you to use those services without a proxy. **NOTE:** This feature has only been tested with the supported cluster type and associated image: Kubernetes and Swarm use the Fedora Atomic image, and Mesos uses the Ubuntu image. Proxy Parameters to define before use ===================================== 1. http-proxy Address of a proxy that will receive all HTTP requests and relay them. The format is a URL including a port number. For example: http://10.11.12.13:8000 or http://abcproxy.com:8000 2. https-proxy Address of a proxy that will receive all HTTPS requests and relay them. The format is a URL including a port number. For example: https://10.11.12.13:8000 or https://abcproxy.com:8000 3. no-proxy A comma separated list of IP addresses or hostnames that should bypass your proxy, and make connections directly. **NOTE:** You may not express networks/subnets. It only accepts names and ip addresses. Bad example: 192.168.0.0/28. Steps to configure proxies. ============================== You can specify all three proxy parameters while creating ClusterTemplate of any coe type. All of proxy parameters are optional. openstack coe cluster template create k8s-cluster-template \ --image fedora-atomic-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe kubernetes \ --http-proxy \ --https-proxy \ --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> openstack coe cluster template create swarm-cluster-template \ --image fedora-atomic-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe swarm \ --http-proxy \ --https-proxy \ --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> openstack coe cluster template create mesos-cluster-template \ --image ubuntu-mesos \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe mesos \ --http-proxy \ --https-proxy \ --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> magnum-6.1.0/doc/source/admin/configuring.rst0000666000175100017510000000377013244017334021241 0ustar zuulzuul00000000000000.. Copyright 2016 Hewlett Packard Enterprise Development Company LP All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Configuration ============= Magnum has a number of configuration options which will be detailed here. Magnum Config ------------- The magnum configuration file is called ``magnum.conf``. Magnum Pipeline --------------- The pipeline details are contained in ``api-paste.ini``. Healthcheck Middleware ~~~~~~~~~~~~~~~~~~~~~~ This piece of middleware creates an endpoint that allows a load balancer to probe if the API endpoint should be available at the node or not. The healthcheck middleware should be placed early in the pipeline. Which is located in your ``api-paste.ini`` under a section called ``[filter:healthcheck]``. It should look like this:: [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/magnum/healthcheck_disable The main pipeline using this filter should look something like this also defined in the ``api-paste.ini``:: [pipeline:main] pipeline = cors healthcheck request_id authtoken api_v1 If you wish to disable a middleware without taking it out of the pipeline, you can create a file under the file path defined by ``disable_by_file_path`` ie. ``/etc/magnum/healthcheck_disable``. For more information see `oslo.middleware `_. magnum-6.1.0/doc/source/admin/gmr.rst0000666000175100017510000000557613244017334017522 0ustar zuulzuul00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Magnum contains a mechanism whereby developers and system administrators can generate a report about the state of a running Magnum executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Magnum process with support (see below). The *GMR* will then be outputted as standard error for that particular process. For example, suppose that ``magnum-api`` has process id ``8675``, and was run with ``2>/var/log/magnum/magnum-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/magnum/magnum-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread ids for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from magnum import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` magnum-6.1.0/doc/source/admin/index.rst0000666000175100017510000000107613244017343020033 0ustar zuulzuul00000000000000Adminstrator's Guide ==================== Installation & Operations ------------------------- If you are a system administrator running Magnum, this section contains information that should help you understand how to deploy, operate, and upgrade the services. .. toctree:: :maxdepth: 1 Magnum Proxy gmr Troubleshooting FAQ Configuration ------------- Following pages will be helpful in configuring specific aspects of Magnum that may or may not be suitable to every situation. .. toctree:: :maxdepth: 1 configuring magnum-6.1.0/doc/source/admin/troubleshooting-guide.rst0000666000175100017510000007575113244017334023261 0ustar zuulzuul00000000000000============================ Magnum Troubleshooting Guide ============================ This guide is intended for users who use Magnum to deploy and manage clusters of hosts for a Container Orchestration Engine. It describes common failure conditions and techniques for troubleshooting. To help the users quickly identify the relevant information, the guide is organized as a list of failure symptoms: each has some suggestions with pointers to the details for troubleshooting. A separate section `for developers`_ describes useful techniques such as debugging unit tests and gate tests. Failure symptoms ================ My cluster-create takes a really long time If you are using devstack on a small VM, cluster-create will take a long time and may eventually fail because of insufficient resources. Another possible reason is that a process on one of the nodes is hung and heat is still waiting on the signal. In this case, it will eventually fail with a timeout, but since heat has a long default timeout, you can look at the `heat stacks`_ and check the WaitConditionHandle resources. My cluster-create fails with error: "Failed to create trustee XXX in domain XXX" Check the `trustee for cluster`_ Kubernetes cluster-create fails Check the `heat stacks`_, log into the master nodes and check the `Kubernetes services`_ and `etcd service`_. Swarm cluster-create fails Check the `heat stacks`_, log into the master nodes and check the `Swarm services`_ and `etcd service`_. Mesos cluster-create fails Check the `heat stacks`_, log into the master nodes and check the `Mesos services`_. I get the error "Timed out waiting for a reply" when deploying a pod Verify the `Kubernetes services`_ and `etcd service`_ are running on the master nodes. I deploy pods on Kubernetes cluster but the status stays "Pending" The pod status is "Pending" while the Docker image is being downloaded, so if the status does not change for a long time, log into the minion node and check for `Cluster internet access`_. I deploy pods and services on Kubernetes cluster but the app is not working The pods and services are running and the status looks correct, but if the app is performing communication between pods through services, verify `Kubernetes networking`_. Swarm cluster is created successfully but I cannot deploy containers Check the `Swarm services`_ and `etcd service`_ on the master nodes. Mesos cluster is created successfully but I cannot deploy containers on Marathon Check the `Mesos services`_ on the master node. I get a "Protocol violation" error when deploying a container For Kubernetes, check the `Kubernetes services`_ to verify that kube-apiserver is running to accept the request. Check `TLS`_ and `Barbican service`_. My cluster-create fails with a resource error on docker_volume Check for available volume space on Cinder and the `request volume size`_ in the heat template. Run "nova volume-list" to check the volume status. Troubleshooting details ======================= Heat stacks ----------- *To be filled in* A cluster is deployed by a set of heat stacks: one top level stack and several nested stack. The stack names are prefixed with the cluster name and the nested stack names contain descriptive internal names like *kube_masters*, *kube_minions*. To list the status of all the stacks for a cluster: heat stack-list -n | grep *cluster-name* If the cluster has failed, then one or more of the heat stacks would have failed. From the stack list above, look for the stacks that failed, then look for the particular resource(s) that failed in the failed stack by: heat resource-list *failed-stack-name* | grep "FAILED" The resource_type of the failed resource should point to the OpenStack service, e.g. OS::Cinder::Volume. Check for more details on the failure by: heat resource-show *failed-stack-name* *failed-resource-name* The resource_status_reason may give an indication on the failure, although in some cases it may only say "Unknown". If the failed resource is OS::Heat::WaitConditionHandle, this indicates that one of the services that are being started on the node is hung. Log into the node where the failure occurred and check the respective `Kubernetes services`_, `Swarm services`_ or `Mesos services`_. If the failure is in other scripts, look for them as `Heat software resource scripts`_. Trustee for cluster ------------------- When a user creates a cluster, Magnum will dynamically create a service account for the cluster. The service account will be used by the cluster to access the OpenStack services (i.e. Neutron, Swift, etc.). A trust relationship will be created between the user who created the cluster (the "trustor") and the service account created for the cluster (the "trustee"). For details, please refer `_. If Magnum fails to create the trustee, check the magnum config file (usually in /etc/magnum/magnum.conf). Make sure 'trustee_*' and 'auth_uri' are set and their values are correct: [keystone_authtoken] auth_uri = http://controller:5000/v3 ... [trust] trustee_domain_admin_password = XXX trustee_domain_admin_id = XXX trustee_domain_id = XXX If the 'trust' group is missing, you might need to create the trustee domain and the domain admin: .. code-block:: bash . /opt/stack/devstack/accrc/admin/admin export OS_IDENTITY_API_VERSION=3 unset OS_AUTH_TYPE openstack domain create magnum openstack user create trustee_domain_admin --password secret \ --domain magnum openstack role add --user=trustee_domain_admin --user-domain magnum \ --domain magnum admin . /opt/stack/devstack/functions export MAGNUM_CONF=/etc/magnum/magnum.conf iniset $MAGNUM_CONF trust trustee_domain_id \ $(openstack domain show magnum | awk '/ id /{print $4}') iniset $MAGNUM_CONF trust trustee_domain_admin_id \ $(openstack user show trustee_domain_admin | awk '/ id /{print $4}') iniset $MAGNUM_CONF trust trustee_domain_admin_password secret Then, restart magnum-api and magnum-cond to pick up the new configuration. If the problem still exists, you might want to manually verify your domain admin credential to ensure it has the right privilege. To do that, run the script below with the credentials replaced (you must use the IDs where specified). If it fails, that means the credential you provided is invalid. .. code-block:: python from keystoneauth1.identity import v3 as ka_v3 from keystoneauth1 import session as ka_session from keystoneclient.v3 import client as kc_v3 auth = ka_v3.Password( auth_url=YOUR_AUTH_URI, user_id=YOUR_TRUSTEE_DOMAIN_ADMIN_ID, domain_id=YOUR_TRUSTEE_DOMAIN_ID, password=YOUR_TRUSTEE_DOMAIN_ADMIN_PASSWORD) session = ka_session.Session(auth=auth) domain_admin_client = kc_v3.Client(session=session) user = domain_admin_client.users.create( name='anyname', password='anypass') TLS --- In production deployments, operators run the OpenStack APIs using ssl certificates and in private clouds it is common to use self-signed or certificates signed from CAs that they are usually not included in the systems' default CA-bundles. Magnum clusters with TLS enabled have their own CA but they need to make requests to the OpenStack APIs for several reasons. Eg Get the cluster CA and sign node certificates (Keystone, Magnum), signal the Heat API for stack completion, create resources (volumes, load balancers) or get information for each node (Cinder, Neutron, Nova). In these cases, the cluster nodes need the CA used for to run the APIs. To pass the OpenStack CA bundle to the nodes you can set the CA using the `openstack_ca_file` option in the `drivers` section of Magnum's configuration file (usually `/etc/magnum/magnum.conf`). The default drivers in magnum install this CA in the system and set it in all the places it might be needed (eg when configuring the kubernetes cloud provider or for the heat-agents.) The cluster nodes will validate the Certificate Authority by default when making requests to the OpenStack APIs (Keystone, Magnum, Heat). If you need to disable CA validation, the configuration parameter verify_ca can be set to False. More information on `CA Validation `_. Barbican service ---------------- *To be filled in* Cluster internet access ----------------------- The nodes for Kubernetes, Swarm and Mesos are connected to a private Neutron network, so to provide access to the external internet, a router connects the private network to a public network. With devstack, the default public network is "public", but this can be replaced by the parameter "external-network" in the ClusterTemplate. The "public" network with devstack is actually not a real external network, so it is in turn routed to the network interface of the host for devstack. This is configured in the file local.conf with the variable PUBLIC_INTERFACE, for example:: PUBLIC_INTERFACE=eth1 If the route to the external internet is not set up properly, the ectd discovery would fail (if using public discovery) and container images cannot be downloaded, among other failures. First, check for connectivity to the external internet by pinging an external IP (the IP shown here is an example; use an IP that works in your case):: ping 8.8.8.8 If the ping fails, there is no route to the external internet. Check the following: - Is PUBLIC_INTERFACE in devstack/local.conf the correct network interface? Does this interface have a route to the external internet? - If "external-network" is specified in the ClusterTemplate, does this network have a route to the external internet? - Is your devstack environment behind a firewall? This can be the case for some enterprises or countries. In this case, consider using a `proxy server `_. - Is the traffic blocked by the security group? Check the `rules of security group `_. - Is your host NAT'ing your internal network correctly? Check your host `iptables `_. - Use *tcpdump* for `networking troubleshooting `_. You can run *tcpdump* on the interface *docker0, flannel0* and *eth0* on the node and then run *ping* to see the path of the message from the container. If ping is successful, check that DNS is working:: wget google.com If DNS works, you should get back a few lines of HTML text. If the name lookup fails, check the following: - Is the DNS entry correct in the subnet? Try "neutron subnet-show " for the private subnet and check dns_nameservers. The IP should be either the default public DNS 8.8.8.8 or the value specified by "dns-nameserver" in the ClusterTemplate. - If you are using your own DNS server by specifying "dns-nameserver" in the ClusterTemplate, is it reachable and working? - More help on `DNS troubleshooting `_. Kubernetes networking --------------------- The networking between pods is different and separate from the neutron network set up for the cluster. Kubernetes presents a flat network space for the pods and services and uses different network drivers to provide this network model. It is possible for the pods to come up correctly and be able to connect to the external internet, but they cannot reach each other. In this case, the app in the pods may not be working as expected. For example, if you are trying the `redis example `_, the key:value may not be replicated correctly. In this case, use the following steps to verify the inter-pods networking and pinpoint problems. Since the steps are specific to the network drivers, refer to the particular driver being used for the cluster. Using Flannel as network driver ............................... Flannel is the default network driver for Kubernetes clusters. Flannel is an overlay network that runs on top of the neutron network. It works by encapsulating the messages between pods and forwarding them to the correct node that hosts the target pod. First check the connectivity at the node level. Log into two different minion nodes, e.g. node A and node B, run a docker container on each node, attach to the container and find the IP. For example, on node A:: sudo docker run -it alpine # ip -f inet -o a | grep eth0 | awk '{print $4}' 10.100.54.2/24 Similarly, on node B:: sudo docker run -it alpine # ip -f inet -o a | grep eth0 | awk '{print $4}' 10.100.49.3/24 Check that the containers can see each other by pinging from one to another. On node A:: # ping 10.100.49.3 PING 10.100.49.3 (10.100.49.3): 56 data bytes 64 bytes from 10.100.49.3: seq=0 ttl=60 time=1.868 ms 64 bytes from 10.100.49.3: seq=1 ttl=60 time=1.108 ms Similarly, on node B:: # ping 10.100.54.2 PING 10.100.54.2 (10.100.54.2): 56 data bytes 64 bytes from 10.100.54.2: seq=0 ttl=60 time=2.678 ms 64 bytes from 10.100.54.2: seq=1 ttl=60 time=1.240 ms If the ping is not successful, check the following: - Is neutron working properly? Try pinging between the VMs. - Are the docker0 and flannel0 interfaces configured correctly on the nodes? Log into each node and find the Flannel CIDR by:: cat /run/flannel/subnet.env | grep FLANNEL_SUBNET FLANNEL_SUBNET=10.100.54.1/24 Then check the interfaces by:: ifconfig flannel0 ifconfig docker0 The correct configuration should assign flannel0 with the "0" address in the subnet, like *10.100.54.0*, and docker0 with the "1" address, like *10.100.54.1*. - Verify the IP's assigned to the nodes as found above are in the correct Flannel subnet. If this is not correct, the docker daemon is not configured correctly with the parameter *--bip*. Check the systemd service for docker. - Is Flannel running properly? check the `Running Flannel`_. - Ping and try `tcpdump `_ on each network interface along the path between two nodes to see how far the message is able to travel. The message path should be as follows: 1. Source node: docker0 2. Source node: flannel0 3. Source node: eth0 4. Target node: eth0 5. Target node: flannel0 6. Target node: docker0 If ping works, this means the flannel overlay network is functioning correctly. The containers created by Kubernetes for pods will be on the same IP subnet as the containers created directly in Docker as above, so they will have the same connectivity. However, the pods still may not be able to reach each other because normally they connect through some Kubernetes services rather than directly. The services are supported by the kube-proxy and rules inserted into the iptables, therefore their networking paths have some extra hops and there may be problems here. To check the connectivity at the Kubernetes pod level, log into the master node and create two pods and a service for one of the pods. You can use the examples provided in the directory */etc/kubernetes/examples/* for the first pod and service. This will start up an nginx container and a Kubernetes service to expose the endpoint. Create another manifest for a second pod to test the endpoint:: cat > alpine.yaml << END apiVersion: v1 kind: Pod metadata: name: alpine spec: containers: - name: alpine image: alpine args: - sleep - "1000000" END kubectl create -f /etc/kubernetes/examples/pod-nginx-with-label.yaml kubectl create -f /etc/kubernetes/examples/service.yaml kubectl create -f alpine.yaml Get the endpoint for the nginx-service, which should route message to the pod nginx:: kubectl describe service nginx-service | grep -e IP: -e Port: IP: 10.254.21.158 Port: 8000/TCP Note the IP and port to use for checking below. Log into the node where the *alpine* pod is running. You can find the hosting node by running this command on the master node:: kubectl get pods -o wide | grep alpine | awk '{print $6}' k8-gzvjwcooto-0-gsrxhmyjupbi-kube-minion-br73i6ans2b4 To get the IP of the node, query Nova on devstack:: nova list On this hosting node, attach to the *alpine* container:: export DOCKER_ID=`sudo docker ps | grep k8s_alpine | awk '{print $1}'` sudo docker exec -it $DOCKER_ID sh From the *alpine* pod, you can try to reach the nginx pod through the nginx service using the IP and Port found above:: wget 10.254.21.158:8000 If the connection is successful, you should receive the file *index.html* from nginx. If the connection is not successful, you will get an error message like::xs wget: can't connect to remote host (10.100.54.9): No route to host In this case, check the following: - Is kube-proxy running on the nodes? It runs as a container on each node. check by logging in the minion nodes and run:: sudo docker ps | grep k8s_kube-proxy - Check the log from kube-proxy by running on the minion nodes:: export PROXY=`sudo docker ps | grep "hyperkube proxy" | awk '{print $1}'` sudo docker logs $PROXY - Try additional `service debugging `_. To see what's going during provisioning:: kubectl get events To get information on a service in question:: kubectl describe services etcd service ------------ The etcd service is used by many other components for key/value pair management, therefore if it fails to start, these other components will not be running correctly either. Check that etcd is running on the master nodes by:: sudo service etcd status -l If it is running correctly, you should see that the service is successfully deployed:: Active: active (running) since .... The log message should show the service being published:: etcdserver: published {Name:10.0.0.5 ClientURLs:[http://10.0.0.5:2379]} to cluster 3451e4c04ec92893 In some cases, the service may show as *active* but may still be stuck in discovery mode and not fully operational. The log message may show something like:: discovery: waiting for other nodes: error connecting to https://discovery.etcd.io, retrying in 8m32s If this condition persists, check for `Cluster internet access`_. If the daemon is not running, the status will show the service as failed, something like:: Active: failed (Result: timeout) In this case, try restarting etcd by:: sudo service etcd start If etcd continues to fail, check the following: - Check the log for etcd:: sudo journalctl -u etcd - etcd requires discovery, and the default discovery method is the public discovery service provided by etcd.io; therefore, a common cause of failure is that this public discovery service is not reachable. Check by running on the master nodes:: . /etc/sysconfig/heat-params curl $ETCD_DISCOVERY_URL You should receive something like:: {"action":"get", "node":{"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6", "dir":true, "nodes":[ {"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6/7d8a68781a20c0a5", "value":"10.0.0.5=http://10.0.0.5:2380", "modifiedIndex":978239406, "createdIndex":978239406}], "modifiedIndex":978237118, "createdIndex":978237118} } The list of master IP is provided by Magnum during cluster deployment, therefore it should match the current IP of the master nodes. If the public discovery service is not reachable, check the `Cluster internet access`_. Running Flannel --------------- When deploying a COE, Flannel is available as a network driver for certain COE type. Magnum currently supports Flannel for a Kubernetes or Swarm cluster. Flannel provides a flat network space for the containers in the cluster: they are allocated IP in this network space and they will have connectivity to each other. Therefore, if Flannel fails, some containers will not be able to access services from other containers in the cluster. This can be confirmed by running *ping* or *curl* from one container to another. The Flannel daemon is run as a systemd service on each node of the cluster. To check Flannel, run on each node:: sudo service flanneld status If the daemon is running, you should see that the service is successfully deployed:: Active: active (running) since .... If the daemon is not running, the status will show the service as failed, something like:: Active: failed (Result: timeout) .... or:: Active: inactive (dead) .... Flannel daemon may also be running but not functioning correctly. Check the following: - Check the log for Flannel:: sudo journalctl -u flanneld - Since Flannel relies on etcd, a common cause for failure is that the etcd service is not running on the master nodes. Check the `etcd service`_. If the etcd service failed, once it has been restored successfully, the Flannel service can be restarted by:: sudo service flanneld restart - Magnum writes the configuration for Flannel in a local file on each master node. Check for this file on the master nodes by:: cat /etc/sysconfig/flannel-network.json The content should be something like:: { "Network": "10.100.0.0/16", "Subnetlen": 24, "Backend": { "Type": "udp" } } where the values for the parameters must match the corresponding parameters from the ClusterTemplate. Magnum also loads this configuration into etcd, therefore, verify the configuration in etcd by running *etcdctl* on the master nodes:: . /etc/sysconfig/flanneld etcdctl get $FLANNEL_ETCD_KEY/config - Each node is allocated a segment of the network space. Check for this segment on each node by:: grep FLANNEL_SUBNET /run/flannel/subnet.env The containers on this node should be assigned an IP in this range. The nodes negotiate for their segment through etcd, and you can use *etcdctl* on the master node to query the network segment associated with each node:: . /etc/sysconfig/flanneld for s in `etcdctl ls $FLANNEL_ETCD_KEY/subnets` do echo $s etcdctl get $s done /atomic.io/network/subnets/10.100.14.0-24 {"PublicIP":"10.0.0.5"} /atomic.io/network/subnets/10.100.61.0-24 {"PublicIP":"10.0.0.6"} /atomic.io/network/subnets/10.100.92.0-24 {"PublicIP":"10.0.0.7"} Alternatively, you can read the full record in ectd by:: curl http://:2379/v2/keys/coreos.com/network/subnets You should receive a JSON snippet that describes all the segments allocated. - This network segment is passed to Docker via the parameter *--bip*. If this is not configured correctly, Docker would not assign the correct IP in the Flannel network segment to the container. Check by:: cat /run/flannel/docker ps -aux | grep docker - Check the interface for Flannel:: ifconfig flannel0 The IP should be the first address in the Flannel subnet for this node. - Flannel has several different backend implementations and they have specific requirements. The *udp* backend is the most general and have no requirement on the network. The *vxlan* backend requires vxlan support in the kernel, so ensure that the image used does provide vxlan support. The *host-gw* backend requires that all the hosts are on the same L2 network. This is currently met by the private Neutron subnet created by Magnum; however, if other network topology is used instead, ensure that this requirement is met if *host-gw* is used. Current known limitation: the image fedora-21-atomic-5.qcow2 has Flannel version 0.5.0. This version has known bugs that prevent the backend vxland and host-gw to work correctly. Only the backend udp works for this image. Version 0.5.3 and later should work correctly. The image fedora-21-atomic-7.qcow2 has Flannel version 0.5.5. Kubernetes services ------------------- *To be filled in* (How to introspect k8s when heat works and k8s does not) Additional `Kubenetes troubleshooting guide `_ is available. Swarm services -------------- *To be filled in* (How to check on a swarm cluster: see membership information, view master, agent containers) Mesos services -------------- *To be filled in* Barbican issues --------------- *To be filled in* Docker CLI ---------- *To be filled in* Request volume size ------------------- *To be filled in* Heat software resource scripts ------------------------------ *To be filled in* For Developers ============== This section is intended to help with issues that developers may run into in the course of their development adventures in Magnum. Troubleshooting in Gate ----------------------- Simulating gate tests *Note*: This is adapted from Devstack Gate's `README`_ which is worth a quick read to better understand the following) #. Boot a VM like described in the Devstack Gate's `README`_ . #. Provision this VM like so:: apt-get update \ && apt-get upgrade \ # Kernel upgrade, as recommended by README, select to keep existing grub config && apt-get install git tmux vim \ && git clone https://git.openstack.org/openstack-infra/system-config \ && system-config/install_puppet.sh && system-config/install_modules.sh \ && puppet apply \ --modulepath=/root/system-config/modules:/etc/puppet/modules \ -e "class { openstack_project::single_use_slave: install_users => false, ssh_key => \"$( cat .ssh/authorized_keys | awk '{print $2}' )\" }" \ && echo "jenkins ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers \ && cat ~/.ssh/authorized_keys >> /home/jenkins/.ssh/authorized_keys #. Compare ``~/.ssh/authorized_keys`` and ``/home/jenkins/.ssh/authorized_keys``. Your original public SSH key should now be in ``/home/jenkins/.ssh/authorized_keys``. If it's not, explicitly copy it (this can happen if you spin up a using ``--key-name ``, for example). #. Assuming all is well up to this point, now it's time to ``reboot`` into the latest kernel #. Once you're done booting into the new kernel, log back in as ``jenkins`` user to continue with setting up the simulation. #. Now it's time to set up the workspace:: export REPO_URL=https://git.openstack.org export WORKSPACE=/home/jenkins/workspace/testing export ZUUL_URL=/home/jenkins/workspace-cache2 export ZUUL_REF=HEAD export ZUUL_BRANCH=master export ZUUL_PROJECT=openstack/magnum mkdir -p $WORKSPACE git clone $REPO_URL/$ZUUL_PROJECT $ZUUL_URL/$ZUUL_PROJECT \ && cd $ZUUL_URL/$ZUUL_PROJECT \ && git checkout remotes/origin/$ZUUL_BRANCH #. At this point, you may be wanting to test a specific change. If so, you can pull down the changes in ``$ZUUL_URL/$ZUUL_PROJECT`` directory:: cd $ZUUL_URL/$ZUUL_PROJECT \ && git fetch https://review.openstack.org/openstack/magnum refs/changes/83/247083/12 && git checkout FETCH_HEAD #. Now you're ready to pull down the ``devstack-gate`` scripts that will let you run the gate job on your own VM:: cd $WORKSPACE \ && git clone --depth 1 $REPO_URL/openstack-infra/devstack-gate #. And now you can kick off the job using the following script (the ``devstack-gate`` documentation suggests just copying from the job which can be found in the `project-config `_ repository), naturally it should be executable (``chmod u+x ``):: #!/bin/bash -xe cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TIMEOUT=240 # bump this if you see timeout issues. Default is 120 export DEVSTACK_GATE_TEMPEST=0 export DEVSTACK_GATE_NEUTRON=1 # Enable tempest for tempest plugin export ENABLED_SERVICES=tempest export BRANCH_OVERRIDE="default" if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export PROJECTS="openstack/magnum $PROJECTS" export PROJECTS="openstack/python-magnumclient $PROJECTS" export PROJECTS="openstack/barbican $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin magnum git://git.openstack.org/openstack/magnum" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" # Keep localrc to be able to set some vars in post_test_hook export KEEP_LOCALRC=1 function gate_hook { cd /opt/stack/new/magnum/ ./magnum/tests/contrib/gate_hook.sh api # change this to swarm to run swarm functional tests or k8s to run kubernetes functional tests } export -f gate_hook function post_test_hook { . $BASE/new/devstack/accrc/admin/admin cd /opt/stack/new/magnum/ ./magnum/tests/contrib/post_test_hook.sh api # change this to swarm to run swarm functional tests or k8s to run kubernetes functional tests } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh Helpful nuances about the Devstack Gate * Main job is in ``project-config``'s `magnum.yaml `_. * Must modify parameters passed in since those are escaped: * Anything with ``{}`` should be set as an environment variable * Anything with ``{{ }}`` should have those brackets changed to single brackets - ``{}``. * As with the documentation for Devstack Gate, you can just create a new file for the job you want, paste in what you want, then ``chmod u+x `` and run it. * Parameters can be found in `projects.yaml `_. This file changes a lot, so it's more reliable to say that you can search for the magnum jobs where you'll see examples of what gets passed in. * Three jobs are usually run as a part of Magnum gate, all of which are found in ``project-config``'s `macros.yml `_: * link-logs * net-info * devstack-checkout * After you run a job, it's ideal to clean up and start over with a fresh VM to best simulate the Devstack Gate environment. .. _README: https://github.com/openstack-infra/devstack-gate/blob/master/README.rst#simulating-devstack-gate-tests P magnum-6.1.0/doc/source/configuration/0000775000175100017510000000000013244017675017753 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/configuration/sample-policy.rst0000666000175100017510000000046613244017334023263 0ustar zuulzuul00000000000000==================== Policy configuration ==================== Configuration ~~~~~~~~~~~~~ The following is an overview of all available policies in Magnum. For a sample configuration file, refer to :doc:`samples/policy-yaml`. .. show-policy:: :config-file: ../../etc/magnum/magnum-policy-generator.conf magnum-6.1.0/doc/source/configuration/sample-config.rst0000666000175100017510000000076213244017334023230 0ustar zuulzuul00000000000000============================ Magnum Configuration Options ============================ The following is a sample Magnum configuration for adaptation and use. It is auto-generated from Magnum when this documentation is built, so if you are having issues with an option, please compare your version of Magnum with the version of this documentation. The sample configuration can also be viewed in :download:`file form `. .. literalinclude:: /_static/magnum.conf.sample magnum-6.1.0/doc/source/configuration/index.rst0000666000175100017510000000025413244017334021607 0ustar zuulzuul00000000000000Sample Configuration and Policy File ------------------------------------ .. toctree:: :maxdepth: 2 sample-config.rst sample-policy.rst samples/index.rst magnum-6.1.0/doc/source/configuration/samples/0000775000175100017510000000000013244017675021417 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/configuration/samples/policy-yaml.rst0000666000175100017510000000034413244017334024403 0ustar zuulzuul00000000000000=========== policy.yaml =========== Use the ``policy.yaml`` file to define additional access controls that apply to the Container Infrastructure Management service: .. literalinclude:: ../../_static/magnum.policy.yaml.sample magnum-6.1.0/doc/source/configuration/samples/index.rst0000666000175100017510000000043013244017334023247 0ustar zuulzuul00000000000000========================== Sample configuration files ========================== Configuration files can alter how Magnum behaves at runtime and by default are located in ``/etc/magnum/``. Links to sample configuration files can be found below: .. toctree:: policy-yaml.rst magnum-6.1.0/doc/source/conf.py0000666000175100017510000000617113244017334016402 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.graphviz', 'stevedore.sphinxext', 'openstackdocstheme', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', ] # openstackdocstheme options repository_name = 'openstack/magnum' bug_project = 'magnum' bug_tag = '' config_generator_config_file = '../../etc/magnum/magnum-config-generator.conf' sample_config_basename = '_static/magnum' policy_generator_config_file = '../../etc/magnum/magnum-policy-generator.conf' sample_policy_basename = '_static/magnum' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'magnum' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # The short X.Y version. from magnum.version import version_info as magnum_version version = magnum_version.canonical_version_string() # The full version, including alpha/beta/rc tags. release = magnum_version.version_string_with_vcs() # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project html_last_updated_fmt = '%Y-%m-%d %H:%M' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} magnum-6.1.0/doc/source/contributor/0000775000175100017510000000000013244017675017456 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/contributor/functional-test.rst0000666000175100017510000001012313244017334023316 0ustar zuulzuul00000000000000======================== Running functional tests ======================== This is a guide for developers who want to run functional tests in their local machine. Prerequisite ============ You need to have a Magnum instance running somewhere. If you are using devstack, follow the developer quickstart guide to deploy Magnum in a devstack environment ``_ Configuration ============= The functional tests require a couple configuration files, so you'll need to generate them yourself. For devstack ------------ If you're using devstack, you can copy and modify the devstack configuration:: cd /opt/stack/magnum cp /opt/stack/tempest/etc/tempest.conf /opt/stack/magnum/etc/tempest.conf cp functional_creds.conf.sample functional_creds.conf # update the IP address HOST=$(iniget /etc/magnum/magnum.conf api host) sed -i "s/127.0.0.1/$HOST/" functional_creds.conf # update admin password . /opt/stack/devstack/openrc admin admin iniset functional_creds.conf admin pass $OS_PASSWORD # update demo password . /opt/stack/devstack/openrc demo demo iniset functional_creds.conf auth password $OS_PASSWORD Set the DNS name server to be used by your cluster nodes (e.g. 8.8.8.8):: # update DNS name server . /opt/stack/devstack/openrc demo demo iniset functional_creds.conf magnum dns_nameserver Create the necessary keypair and flavor:: . /opt/stack/devstack/openrc admin admin openstack keypair create --public-key ~/.ssh/id_rsa.pub default openstack flavor create --id 100 --ram 1024 --disk 10 --vcpus 1 m1.magnum openstack flavor create --id 200 --ram 512 --disk 10 --vcpus 1 s1.magnum . /opt/stack/devstack/openrc demo demo openstack keypair create --public-key ~/.ssh/id_rsa.pub default You may need to explicitly upgrade required packages if you've installed them before and their versions become too old:: UPPER_CONSTRAINTS=/opt/stack/requirements/upper-constraints.txt sudo pip install -c $UPPER_CONSTRAINTS -U -r test-requirements.txt Outside of devstack ------------------- If you are not using devstack, you'll need to create the configuration files. The /etc/tempest.conf configuration file is documented here ``_ Here's a reasonable sample of tempest.conf settings you might need:: [auth] use_dynamic_credentials=False test_accounts_file=/tmp/etc/magnum/accounts.yaml admin_username=admin admin_password=password admin_project_name=admin [identity] disable_ssl_certificate_validation=True uri=https://identity.example.com/v2.0 auth_version=v2 region=EAST [identity-feature-enabled] api_v2 = true api_v3 = false trust = false [oslo_concurrency] lock_path = /tmp/ [magnum] image_id=22222222-2222-2222-2222-222222222222 nic_id=11111111-1111-1111-1111-111111111111 keypair_id=default flavor_id=small magnum_url=https://magnum.example.com/v1 [debug] trace_requests=true A sample functional_creds.conf can be found in the root of this project named functional_creds.conf.sample When you run tox, be sure to specify the location of your tempest.conf using TEMPEST_CONFIG_DIR:: export TEMPEST_CONFIG_DIR=/tmp/etc/magnum/ tox -e functional-api Execution ========= Magnum has different functional tests for each COE and for the API. All the environments are detailed in Magnum's tox.ini:: cat tox.ini | grep functional- | awk -F: '{print $2}' | sed s/]// To run a particular subset of tests, specify that group as a tox environment. For example, here is how you would run all of the kubernetes tests:: tox -e functional-k8s To run a specific test or group of tests, specify the test path as a positional argument:: tox -e functional-k8s -- magnum.tests.functional.k8s.v1.test_k8s_python_client.TestBayModelResource To avoid creating multiple clusters simultaneously, you can execute the tests with concurrency 1:: tox -e functional-swarm -- --concurrency 1 magnum-6.1.0/doc/source/contributor/policies.rst0000666000175100017510000006332313244017334022020 0ustar zuulzuul00000000000000########################### Magnum Development Policies ########################### .. contents:: Magnum is made possible by a wide base of contributors from numerous countries and time zones around the world. We work as a team in accordance with the `Guiding Principles `_ of the OpenStack Community. We all want to be valued members of a successful team on an inspiring mission. Code contributions are merged into our code base through a democratic voting process. Anyone may vote on patches submitted by our contributors, and everyone is encouraged to make actionable and helpful suggestions for how patches can be improved prior to merging. We strive to strike a sensible balance between the speed of our work, and the quality of each contribution. This document describes the correct balance in accordance with the prevailing wishes of our team. This document is an extension of the `OpenStack Governance `_ that explicitly converts our tribal knowledge into a codified record. If any conflict is discovered between the OpenStack governance, and this document, the OpenStack documents shall prevail. ********************* Team Responsibilities ********************* Responsibilities for Everyone ============================= `Everyone` in our community is expected to know and comply with the `OpenStack Community Code of Conduct `_. We all need to work together to maintain a thriving team that enjoys working together to solve challenges. Responsibilities for Contributors ================================= When making contributions to any Magnum code repository, contributors shall expect their work to be peer reviewed. See `Merge Criteria`_ for details about how reviewed code is approved for merge. Expect reviewers to vote against merging a patch, along with actionable suggestions for improvement prior to merging the code. Understand that such a vote is normal, and is essential to our quality process. If you receive votes against your review submission, please revise your work in accordance with any requests, or leave comments indicating why you believe the work should be further considered without revision. If you leave your review without further comments or revision for an extended period, you should mark your patch as `Abandoned`, or it may be marked as `Abandoned` by another team member as a courtesy to you. A patch with no revisions for multiple weeks should be abandoned, or changed to work in progress (WIP) with the `workflow-1` flag. We want all code in the review queue to be actionable by reviewers. Note that an `Abandoned` status shall be considered temporary, and that your patch may be restored and revised if and when you are ready to continue working on it. Note that a core reviewer may un-abandon a patch to allow subsequent revisions by you or another contributor, as needed. When making revisions to patches, please acknowledge and confirm each previous review comment as Done or with an explanation for why the comment was not addressed in your subsequent revision. Summary of Contributor Responsibilities --------------------------------------- * Includes the `Everyone` responsibilities, plus: * Recognize that revisions are a normal part of our review process. * Make revisions to your patches to address reviewer comments. * Mark each inline comment as `Done` once it has been addressed. * Indicate why any requests have not been acted upon. * Set `workflow-1` until a patch is ready for merge consideration. * Consider patches without requested revisions as abandoned after a few weeks. Responsibilities for Reviewers ============================== Each reviewer is responsible for upholding the quality of our code. By making constructive and actionable requests for revisions to patches, together we make better software. When making requests for revisions, each reviewer shall carefully consider our aim to merge contributions in a timely manner, while improving them. **Contributions do not need to be perfect in order to be merged.** You may make comments with a "0" vote to call out stylistic preferences that will not result in a material change to the software if/when resolved. If a patch improves our code but has been through enough revisions that delaying it further is worse than including it now in imperfect form, you may file a tech-debt bug ticket against the code, and vote to merge the imperfect patch. When a reviewer requests a revision to a patch, he or she is expected to review the subsequent revision to verify the change addressed the concern. Summary of Reviewer Responsibilities ------------------------------------ * Includes the Everyone responsibilities, plus: * Uphold quality of our code. * Provide helpful and constructive requests for patch revisions. * Carefully balance need to keep moving while improving contributions. * Submit tech-debt bugs to merge imperfect code with known problems. * Review your requested revisions to verify them. Responsibilities for Core Reviewers =================================== Core reviewers have all the responsibilities mentioned above, as well as a responsibility to judge the readiness of a patch for merge, and to set the `workflow+1` flag to order a patch to be merged once at least one other core reviewers has issued a +2 vote. See: `Merge Criteria`_. Reviewers who use the -2 vote shall: 1. Explain what scenarios can/will lift the -2 or downgrade it to a -1 (non-sticky), or explain "this is unmergable for reason ". Non-negotiable reasons such as breaks API contract, or introduces fundamental security issues are acceptable. 2. Recognize that a -2 needs more justification than a -1 does. Both require actionable notes, but a -2 comment shall outline the reason for the sticky vote rather than a -1. 3. Closely monitor comments and revisions to that review so the vote is promptly downgraded or removed once addressed by the contributor. All core reviewers shall be responsible for setting a positive and welcoming tone toward other reviewers and contributors. Summary of Core Reviewer Responsibilities ----------------------------------------- * Includes the Reviewer responsibilities, plus: * Judge readiness of patches for merge. * Approve patches for merge when requirements are met. * Set a positive and welcoming tone toward other reviewers and contributors. PTL Responsibilities ==================== In accordance with our `Project Team Guide for PTLs `_ our PTL carries all the responsibilities referenced above plus: * Select and target blueprints for each release cycle. * Determine Team Consensus. Resolve disagreements among our team. * May delegate his/her responsibilities to others. * Add and remove core reviewers in accordance with his/her judgement. * Note that in accordance with the Project Team Guide, selection or removal of core reviewers is not a democratic process. * Our PTL shall maintain a core reviewer group that works well together as a team. Our PTL will seek advice from our community when making such changes, but ultimately decides. * Clearly communicate additions to the developer mailing list. ########################## Our Development Philosophy ########################## ******** Overview ******** * Continuous iterative improvements. * Small contributions preferred. * Perfect is the enemy of good. * We need a compass, not a master plan. ********** Discussion ********** We believe in making continuous iterative improvements to our software. Making several small improvements is preferred over making fewer large changes. Contributions of about perhaps 400 lines of change or less are considered ideal because they are easier to review. This makes them more efficient from a review perspective than larger contributions are, because they get reviewed more quickly, and are faster to revise than larger works. We also encourage unrelated changes to be contributed in separate patches to make reasoning about each one simpler. Although we should strive for perfection in our work, we must recognize that what matters more than absolute perfection is that our software is consistently improving over time. When contributions are slowed down by too many revisions, we should decide to merge code even when it is imperfect, as long as we have systematically tracked the weaknesses so we can revisit them with subsequent revision efforts. Rule of Thumb ============= Our rule of thumb shall be the answer to two simple questions: 1. Is this patch making Magnum better? 2. Will this patch cause instability, or prevent others from using Magnum effectively? If the answers respectively are *yes* and *no*, and our objections can be effectively addressed in a follow-up patch, then we should decide to merge code with tech-debt bug tickets to systematically track our desired improvements. ********************* How We Make Decisions ********************* Team Consensus ============== On the Magnum team, we rely on Team Consensus to make key decisions. Team Consensus is the harmonious and peaceful agreement of the majority of our participating team. That means that we seek a clear indication of agreement of those engaged in discussion of a topic. Consensus shall not be confused with the concept of Unanimous Consent where all participants are in full agreement. Our decisions do not require Unanimous Consent. We may still have a team consensus even if we have a small number of team members who disagree with the majority viewpoint. We must recognize that we will not always agree on every key decision. What's more important than our individual position on an argument is that the interests of our team are met. We shall take reasonable efforts to address all opposition by fairly considering it before making a decision. Although Unanimous Consent is not required to make a key decision, we shall not overlook legitimate questions or concerns. Once each such concern has been addressed, we may advance to making a determination of Team Consensus. Some code level changes are controversial in nature. If this happens, and a core reviewer judges the minority viewpoint to be reasonably considered, he or she may conclude we have Team Consensus and approve the patch for merge using the normal voting guidelines. We shall allow reasonable time for discussion and socialization when controversial decisions are considered. If any contributor disagrees with a merged patch, and believes our decision should be reconsidered, (s)he may consult our `Reverting Patches`_ guidelines. No Deadlocks ============ We shall not accept any philosophy of "agree to disagree". This form of deadlock is not decision making, but the absence of it. Instead, we shall proceed to decision making in a timely fashion once all input has been fairly considered. We shall accept when a decision does not go our way. Handling Disagreement ===================== When we disagree, we shall first consult the `OpenStack Community Code of Conduct `_ for guidance. In accordance with our code of conduct, our disagreements shall be handled with patience, respect, and fair consideration for those who don't share the same point of view. When we do not agree, we take care to ask why. We strive to understand the reasons we disagree, and seek opportunities to reach a compromise. Our PTL is responsible for determining Team Consensus when it can not be reached otherwise. In extreme cases, it may be possible to appeal a PTL decision to the `OpenStack TC `_. ******************* Open Design Process ******************* One of the `four open `_ principles embraced by the OpenStack community is Open Design. We collaborate openly to design new features and capabilities, as well as planning major improvements to our software. We use multiple venues to conduct our design, including: * Written specifications * Blueprints * Bug tickets * PTG meetings * Summit meetings * IRC meetings * Mailing list discussions * Review comments * IRC channel discussion The above list is ordered by formality level. Notes and/or minutes from meetings shall be recorded in etherpad documents so they can be accessed by participants not present in the meetings. Meetings shall be open, and shall not intentionally exclude any stakeholders. Specifications ============== The most formal venue for open design are written specifications. These are RST format documents that are proposed in the magnum-specs code repository by release cycle name. The repository holds a template for the format of the document, as required by our PTL for each release cycle. Specifications are intended to be a high level description of a major feature or capability, expressed in a way to demonstrate that the feature has been well contemplated, and is acceptable by Team Consensus. Using specifications allows us to change direction without requiring code rework because input can be considered before code has been written. Specifications do not require specific implementation details. They shall describe the implementation in enough detail to give reviewers a high level sense of what to expect, with examples to make new concepts clear. We do not require specifications that detail every aspect of the implementation. We recognize that it is more effective to express implementations with patches than conveying them in the abstract. If a proposed patch set for an implementation is not acceptable, we can address such concerns using review comments on those patches. If a reviewer has an alternate idea for implementation, they are welcome to develop another patch in WIP or completed form to demonstrate an alternative approach for consideration. This option for submitting an alternative review is available for alternate specification ideas that reach beyond the scope of a simple review comment. Offering reviewers multiple choices for contributions is welcome, and is not considered wasteful. Implementations of features do not require merged specifications. However, major features or refactoring should be expressed in a specification so reviewers will know what to expect prior to considering code for review. Contributors are welcome to start implementation before the specifications are merged, but should be ready to revise the implementation as needed to conform with changes in the merged specification. Reviews ======= A review is a patch set that includes a proposal for inclusion in our code base. We follow the process outlined in the `Code Review `_ section of the `OpenStack Developer's Guide `_. The following workflow states may by applied to each review: ========== ================== ============================================= State Meaning Detail ========== ================== ============================================= workflow-1 Work in progress This patch is submitted for team input, but should not yet be considered for merge. May be set by a core reviewer as a courtesy. It can be set after workflow+1 but prior to merge in order to prevent a gate breaking merge. workflow-0 Ready for reviews This patch should be considered for merge. workflow+1 Approved This patch has received at least two +2 votes, and is approved for merge. Also known as a "+A" vote. ========== ================== ============================================= The following votes may be applied to a review: ====== ==================================================================== Vote Meaning ====== ==================================================================== -2 Do Not Merge * WARNING: Use extreme caution applying this vote, because contributors perceive this action as hostile unless it is accompanied with a genuine offer to help remedy a critical concern collaboratively. * This vote is a veto that indicates a critical problem with the contribution. It is sticky, meaning it must be removed by the individual who added it, even if further revisions are made. * All -2 votes shall be accompanied with a polite comment that clearly states what can be changed by the contributor to result in reversal or downgrade of the vote to a -1. * Core reviewers may use this vote: * To indicate a critical problem to address, such as a security vulnerability that other core reviewers may be unable to recognize. * To indicate a decision that the patch is not consistent with the direction of the project, subsequent to conference with the PTL about the matter. * The PTL may use this vote: * To indicate a decision that the patch is not consistent with the direction of the project. * While coordinating a release to prevent incompatible changes from merging before the release is tagged. * To address a critical concern with the contribution. * Example uses of this vote that are not considered appropriate: * To ensure more reviews before merge. * To block competing patches. * In cases when you lack the time to follow up closely afterward. * To avoid a -2 vote on your contribution, discuss your plans with the development team prior to writing code, and post a WIP (`workflow-1`) patch while you are working on it, and ask for input before you submit it for merge review. -1 This patch needs further work before it can be merged * This vote indicates an opportunity to make our code better before it is merged. * It asks the submitter to make a revision in accordance with your feedback before core reviewers should consider this code for merge. * This vote shall be accompanied with constructive and actionable feedback for how to improve the submission. * If you use a -1 vote to ask a question, and the contributor answers the question, please respond acknowledging the answer. Either change your vote or follow up with additional rationale for why this should remain a -1 comment. * These votes will be cleared when you make a revision to a patch set, and resubmit it for review. * NOTE: Upon fair consideration of the viewpoint shared with this vote, reviewers are encouraged to vote in accordance with their own view of the contribution. This guidance applies when any reviewer (PTL, core, etc.) has voted against it. Such opposing views must be freely expressed to reach Team Consensus. When you agree with a -1 vote, you may also vote -1 on the review to echo the same concern. 0 No Score * Used to make remarks or ask questions that may not require a revision to answer. * Used to confirm that your prior -1 vote concern was addressed. +1 Looks good to me, but someone else must approve * Used to validate the quality of a contribution and express agreement with the implementation. * Resist the temptation to blindly +1 code without reviewing it in sufficient detail to form an opinion. * A core reviewer may use this if they: * Provided a revision to the patch to fix something, but agree with the rest of the patch. * Agree with the patch but have outstanding questions that do not warrant a -1 but would be nice to have answered. * Agree with the patch with some uncertainty before using a +2. It can indicate support while awaiting test results or additional input from others. +2 Looks good to me (core reviewer) * Used by core reviewers to indicate acceptance of the patch in its current form. * Two of these votes are required for +A. * Apply our `Rule of Thumb`_ +A Approval for merge * This means setting the workflow+1 state, and is typically added together with the final +2 vote upon `Merge Criteria`_ being met. ====== ==================================================================== Merge Criteria -------------- We want code to merge relatively quickly in order to keep a rapid pace of innovation. Rather than asking reviewers to wait a prescribed arbitrary time before merging patches, we instead use a simple `2 +2s` policy for approving new code for merge. The following criteria apply when judging readiness to merge a patch: 1. All contributions shall be peer reviewed and approved with a +2 vote by at least two core reviewers prior to being merged. Exceptions known as `Fast Merge`_ commits may bypass peer review as allowed by this policy. 2. The approving reviewer shall verify that all open questions and concerns have been adequately addressed prior to voting +A by adding the workflow+1 to merge a patch. This judgement verifies that `Team Consensus`_ has been reached. Note: We discourage any `workflow+1` vote on patches that only have two +2 votes from cores from the same affiliation. This guideline applies when reviewer diversity allows for it. See `Reverting Patches`_ for details about how to remedy mistakes when code is merged too quickly. Reverting Patches ----------------- Moving quickly with our `Merge Criteria`_ means that sometimes we might make mistakes. If we do, we may revert problematic patches. The following options may be applied: 1. Any contributor may revert a change by submitting a patch to undo the objection and include a reference to the original patch in the commit message. The commit message shall include clear rationale for considering the revert. Normal voting rules apply. 2. Any contributor may re-implement a feature using an alternate approach at any time, even after a previous implementation has merged. Normal voting rules apply. 3. If a core reviewer wishes to revert a change (s)he may use the options described above, or may apply the `Fast Revert`_ policy. Fast Merge ---------- Sometimes we need to merge code quickly by bypassing the peer review process when justified. Allowed exceptions include: * PTL (Project Team Lead) Intervention / Core intervention * Emergency un-break gate. * `VMT `_ embargoed patch submitted to Gerrit. * Automatic proposals (e.g. requirements updates). * PTL / Core discretion (with comment) that a patch already received a +2 but minor (typo/rebase) fixes were addressed by another core reviewer and the `correcting` reviewer has opted to carry forward the other +2. The `correcting` reviewer shall not be the original patch submitter. We recognize that mistakes may happen when changes are merged quickly. When concerns with any `Fast Merge` surface, our `Fast Revert`_ policy may be applied. Fast Revert ----------- This policy was adapted from nova's `Reverts for Retrospective Vetos `_ policy in 2017. Sometimes our simple `2 +2s` approval policy will result in errors when we move quickly. These errors might be a bug that was missed, or equally importantly, it might be that other cores feel that there is a need for further discussion on the implementation of a given piece of code. Rather than an enforced time-based solution - for example, a patch could not be merged until it has been up for review for 3 days - we have chosen an honor-based system of `Team Consensus`_ where core reviewers do not approve controversial patches until proposals are sufficiently socialized and everyone has a chance to raise any concerns. Recognizing that mistakes can happen, we also have a policy where contentious patches which were quickly approved may be reverted so that the discussion around the proposal may continue as if the patch had never been merged in the first place. In such a situation, the procedure is: 1. The commit to be reverted must not have been released. 2. The core team member who has a -2 worthy objection may propose a revert, stating the specific concerns that they feel need addressing. 3. Any subsequent patches depending on the to-be-reverted patch shall be reverted also, as needed. 4. Other core team members shall quickly approve the revert. No detailed debate is needed at this point. A -2 vote on a revert is strongly discouraged, because it effectively blocks the right of cores approving the revert from -2 voting on the original patch. 5. The original patch submitter may re-submit the change, with a reference to the original patch and the revert. 6. The original reviewers of the patch shall restore their votes and attempt to summarize their previous reasons for their votes. 7. The patch shall not be re-approved until the concerns of the opponents are fairly considered. A mailing list discussion or design spec may be the best way to achieve this. This policy shall not be used in situations where `Team Consensus`_ was fairly reached over a reasonable period of time. A `Fast Revert` applies only to new concerns that were not part of the `Team Consensus`_ determination when the patch was merged. See also: `Team Consensus`_. Continuous Improvement ====================== If any part of this document is not clear, or if you have suggestions for how to improve it, please contact our PTL for help. magnum-6.1.0/doc/source/contributor/reno.rst0000666000175100017510000000441713244017334021153 0ustar zuulzuul00000000000000Release Notes ============= What is reno ? -------------- Magnum uses `reno `_ for providing release notes in-tree. That means that a patch can include a *reno file* or a series can have a follow-on change containing that file explaining what the impact is. A *reno file* is a YAML file written in the releasenotes/notes tree which is generated using the reno tool this way: .. code-block:: bash $ tox -e venv -- reno new where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. Refer to the `reno documentation `_ for the full list of sections. When a release note is needed ----------------------------- A release note is required anytime a reno section is needed. Below are some examples for each section. Any sections that would be blank should be left out of the note file entirely. If no section is needed, then you know you don't need to provide a release note :-) * ``upgrade`` * The patch has an `UpgradeImpact `_ tag * A DB change needs some deployer modification (like a migration) * A configuration option change (deprecation, removal or modified default) * some specific changes that have a `DocImpact `_ tag but require further action from an deployer perspective * any patch that requires an action from the deployer in general * ``security`` * If the patch fixes a known vulnerability * ``features`` * If the patch has an `APIImpact `_ tag * ``critical`` * Bugfixes categorized as Critical in Launchpad *impacting users* * ``fixes`` * No clear definition of such bugfixes. Hairy long-standing bugs with high importance that have been fixed are good candidates though. Three sections are left intentionally unexplained (``prelude``, ``issues`` and ``other``). Those are targeted to be filled in close to the release time for providing details about the soon-ish release. Don't use them unless you know exactly what you are doing. magnum-6.1.0/doc/source/contributor/objects.rst0000666000175100017510000001133513244017334021636 0ustar zuulzuul00000000000000.. Copyright 2015 IBM Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Versioned Objects ================= Magnum uses the `oslo.versionedobjects library `_ to construct an object model that can be communicated via RPC. These objects have a version history and functionality to convert from one version to a previous version. This allows for 2 different levels of the code to still pass objects to each other, as in the case of rolling upgrades. Object Version Testing ---------------------- In order to ensure object versioning consistency is maintained, oslo.versionedobjects has a fixture to aid in testing object versioning. `oslo.versionedobjects.fixture.ObjectVersionChecker `_ generates fingerprints of each object, which is a combination of the current version number of the object, along with a hash of the RPC-critical parts of the object (fields and remotable methods). The tests hold a static mapping of the fingerprints of all objects. When an object is changed, the hash generated in the test will differ from that held in the static mapping. This will signal to the developer that the version of the object needs to be increased. Following this version increase, the fingerprint that is then generated by the test can be copied to the static mapping in the tests. This symbolizes that if the code change is approved, this is the new state of the object to compare against. Object Change Example ''''''''''''''''''''' The following example shows the unit test workflow when changing an object (Cluster was updated to hold a new 'foo' field):: tox -e py27 magnum.tests.unit.objects.test_objects This results in a unit test failure with the following output: .. code-block:: python testtools.matchers._impl.MismatchError: !=: reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} actual = {'Cluster': '1.0-22b40e8eed0414561ca921906b189820'} .. code-block:: console : Fields or remotable methods in some objects have changed. Make sure the versions of the objects has been bumped, and update the hashes in the static fingerprints tree (object_data). For more information, read http://docs.openstack.org/developer/magnum/objects.html. This is an indication that me adding the 'foo' field to Cluster means I need to bump the version of Cluster, so I increase the version and add a comment saying what I changed in the new version: .. code-block:: python @base.MagnumObjectRegistry.register class Cluster(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'foo' field VERSION = '1.1' Now that I have updated the version, I will run the tests again and let the test tell me the fingerprint that I now need to put in the static tree: .. code-block:: python testtools.matchers._impl.MismatchError: !=: reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} actual = {'Cluster': '1.1-22b40e8eed0414561ca921906b189820'} I can now copy the new fingerprint needed (1.1-22b40e8eed0414561ca921906b189820), to the object_data map within magnum/tests/unit/objects/test_objects.py: .. code-block:: python object_data = { 'Cluster': '1.1-22b40e8eed0414561ca921906b189820', 'ClusterTemplate': '1.0-06863f04ab4b98307e3d1b736d3137bf', 'Certificate': '1.0-69b579203c6d726be7878c606626e438', 'MyObj': '1.0-b43567e512438205e32f4e95ca616697', 'X509KeyPair': '1.0-fd008eba0fbc390e0e5da247bba4eedd', 'MagnumService': '1.0-d4b8c0f3a234aec35d273196e18f7ed1', } Running the unit tests now shows no failure. If I did not update the version, and rather just copied the new hash to the object_data map, the review would show the hash (but not the version) was updated in object_data. At that point, a reviewer should point this out, and mention that the object version needs to be updated. If a remotable method were added/changed, the same process is followed, because this will also cause a hash change. magnum-6.1.0/doc/source/contributor/quickstart.rst0000666000175100017510000007300713244017343022403 0ustar zuulzuul00000000000000.. _quickstart: ===================== Developer Quick-Start ===================== This is a quick walkthrough to get you started developing code for magnum. This assumes you are already familiar with submitting code reviews to an OpenStack project. .. seealso:: http://docs.openstack.org/infra/manual/developers.html Setup Dev Environment ===================== Install OS-specific prerequisites:: # Ubuntu Xenial: sudo apt update sudo apt install python-dev libssl-dev libxml2-dev curl \ libmysqlclient-dev libxslt-dev libpq-dev git \ libffi-dev gettext build-essential python3-dev # Fedora/RHEL: sudo yum install python-devel openssl-devel mysql-devel curl \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc # openSUSE/SLE 12: sudo zypper install git libffi-devel curl \ libmysqlclient-devel libopenssl-devel libxml2-devel \ libxslt-devel postgresql-devel python-devel \ gettext-runtime Install pip:: curl -s https://bootstrap.pypa.io/get-pip.py | sudo python Install common prerequisites:: sudo pip install virtualenv flake8 tox testrepository git-review You may need to explicitly upgrade virtualenv if you've installed the one from your OS distribution and it is too old (tox will complain). You can upgrade it individually, if you need to:: sudo pip install -U virtualenv Magnum source code should be pulled directly from git:: # from your home or source directory cd ~ git clone https://git.openstack.org/openstack/magnum cd magnum All unit tests should be run using tox. To run magnum's entire test suite:: # run all tests (unit and pep8) tox To run a specific test, use a positional argument for the unit tests:: # run a specific test for Python 2.7 tox -epy27 -- test_conductor You may pass options to the test programs using positional arguments:: # run all the Python 2.7 unit tests (in parallel!) tox -epy27 -- --parallel To run only the pep8/flake8 syntax and style checks:: tox -epep8 To run unit test coverage and check percentage of code covered:: tox -e cover To discover and interact with templates, please refer to ``_ Exercising the Services Using Devstack ====================================== Devstack can be configured to enable magnum support. It is easy to develop magnum with the devstack environment. Magnum depends on nova, glance, heat and neutron to create and schedule virtual machines to simulate bare-metal (full bare-metal support is under active development). **NOTE:** Running devstack within a virtual machine with magnum enabled is not recommended at this time. This session has only been tested on Ubuntu 16.04 (Xenial) and Fedora 20/21. We recommend users to select one of them if it is possible. Clone devstack:: # Create a root directory for devstack if needed sudo mkdir -p /opt/stack sudo chown $USER /opt/stack git clone https://git.openstack.org/openstack-dev/devstack /opt/stack/devstack We will run devstack with minimal local.conf settings required to enable magnum, heat, and neutron (neutron is enabled by default in devstack since Kilo, and heat must be enabled by yourself):: $ cat > /opt/stack/devstack/local.conf << END [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_TOKEN=password SERVICE_PASSWORD=password ADMIN_PASSWORD=password # magnum requires the following to be set correctly PUBLIC_INTERFACE=eth1 # Enable barbican service and use it to store TLS certificates # For details https://docs.openstack.org/developer/magnum/userguide.html#transport-layer-security enable_plugin barbican https://git.openstack.org/openstack/barbican enable_plugin heat https://git.openstack.org/openstack/heat # Enable magnum plugin after dependent plugins enable_plugin magnum https://git.openstack.org/openstack/magnum # Optional: uncomment to enable the Magnum UI plugin in Horizon #enable_plugin magnum-ui https://github.com/openstack/magnum-ui VOLUME_BACKING_FILE_SIZE=20G END **NOTE:** Update PUBLIC_INTERFACE as appropriate for your system. **NOTE:** Enable heat plugin is necessary. Optionally, you can enable neutron/lbaas v2 with octavia to create load balancers for multi master clusters:: $ cat >> /opt/stack/devstack/local.conf << END enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_plugin octavia https://git.openstack.org/openstack/octavia # Disable LBaaS(v1) service disable_service q-lbaas # Enable LBaaS(v2) services enable_service q-lbaasv2 enable_service octavia enable_service o-cw enable_service o-hk enable_service o-hm enable_service o-api END Optionally, you can enable ceilometer in devstack. If ceilometer is enabled, magnum will periodically send metrics to ceilometer:: $ cat >> /opt/stack/devstack/local.conf << END enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer END If you want to deploy Docker Registry 2.0 in your cluster, you should enable swift in devstack:: $ cat >> /opt/stack/devstack/local.conf << END enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account END More devstack configuration information can be found at http://docs.openstack.org/developer/devstack/configuration.html More neutron configuration information can be found at http://docs.openstack.org/developer/devstack/guides/neutron.html Run devstack:: cd /opt/stack/devstack ./stack.sh **NOTE:** This will take a little extra time when the Fedora Atomic micro-OS image is downloaded for the first time. At this point, two magnum process (magnum-api and magnum-conductor) will be running on devstack screens. If you make some code changes and want to test their effects, just stop and restart magnum-api and/or magnum-conductor. Prepare your session to be able to use the various openstack clients including magnum, neutron, and glance. Create a new shell, and source the devstack openrc script:: . /opt/stack/devstack/openrc admin admin Magnum has been tested with the Fedora Atomic micro-OS and CoreOS. Magnum will likely work with other micro-OS platforms, but each requires individual support in the heat template. The Fedora Atomic micro-OS image will automatically be added to glance. You can add additional images manually through glance. To verify the image created when installing devstack use:: $ openstack image list +--------------------------------------+------------------------------------+--------+ | ID | Name | Status | +--------------------------------------+------------------------------------+--------+ | 0bc132b1-ee91-4bd8-b0fd-19deb57fb39f | Fedora-Atomic-27-20180212.2.x86_64 | active | | 7537bbf2-f1c3-47da-97bb-38c09007e146 | cirros-0.3.5-x86_64-disk | active | +--------------------------------------+------------------------------------+--------+ To list the available commands and resources for magnum, use:: openstack help coe To list out the health of the internal services, namely conductor, of magnum, use:: $ openstack coe service list +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ | id | host | binary | state | disabled | disabled_reason | created_at | updated_at | +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ | 1 | oxy-dev.hq1-0a5a3c02.hq1.abcde.com | magnum-conductor | up | | - | 2016-08-31T10:03:36+00:00 | 2016-08-31T10:11:41+00:00 | +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ Create a keypair for use with the ClusterTemplate:: test -f ~/.ssh/id_rsa.pub || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa openstack keypair create --public-key ~/.ssh/id_rsa.pub testkey Check a dns server can resolve a host name properly:: dig @ +short For example:: $ dig www.openstack.org @8.8.8.8 +short www.openstack.org.cdn.cloudflare.net. 104.20.64.68 104.20.65.68 Building a Kubernetes Cluster - Based on Fedora Atomic ====================================================== Create a ClusterTemplate. This is similar in nature to a flavor and describes to magnum how to construct the cluster. The ClusterTemplate specifies a Fedora Atomic image so the clusters which use this ClusterTemplate will be based on Fedora Atomic:: openstack coe cluster template create k8s-cluster-template \ --image Fedora-Atomic-27-20180212.2.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes Create a cluster. Use the ClusterTemplate name as a template for cluster creation. This cluster will result in one master kubernetes node and one minion node:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --node-count 1 Clusters will have an initial status of CREATE_IN_PROGRESS. Magnum will update the status to CREATE_COMPLETE when it is done creating the cluster. Do not create containers, pods, services, or replication controllers before magnum finishes creating the cluster. They will likely not be created, and may cause magnum to become confused. The existing clusters can be listed as follows:: $ openstack coe cluster list +--------------------------------------+-------------+------------+--------------+-----------------+ | uuid | name | node_count | master_count | status | +--------------------------------------+-------------+------------+--------------------------------+ | 9dccb1e6-02dc-4e2b-b897-10656c5339ce | k8s-cluster | 1 | 1 | CREATE_COMPLETE | +--------------------------------------+-------------+------------+--------------+-----------------+ More detailed information for a given cluster is obtained via:: openstack coe cluster show k8s-cluster After a cluster is created, you can dynamically add/remove node(s) to/from the cluster by updating the node_count attribute. For example, to add one more node:: openstack coe cluster update k8s-cluster replace node_count=2 Clusters in the process of updating will have a status of UPDATE_IN_PROGRESS. Magnum will update the status to UPDATE_COMPLETE when it is done updating the cluster. **NOTE:** Reducing node_count will remove all the existing pods on the nodes that are deleted. If you choose to reduce the node_count, magnum will first try to remove empty nodes with no pods running on them. If you reduce node_count by more than the number of empty nodes, magnum must remove nodes that have running pods on them. This action will delete those pods. We strongly recommend using a replication controller before reducing the node_count so any removed pods can be automatically recovered on your remaining nodes. Heat can be used to see detailed information on the status of a stack or specific cluster: To check the list of all cluster stacks:: openstack stack list To check an individual cluster's stack:: openstack stack show Monitoring cluster status in detail (e.g., creating, updating):: CLUSTER_HEAT_NAME=$(openstack stack list | \ awk "/\sk8s-cluster-/{print \$4}") echo ${CLUSTER_HEAT_NAME} openstack stack resource list ${CLUSTER_HEAT_NAME} Building a Kubernetes Cluster - Based on CoreOS =============================================== You can create a Kubernetes cluster based on CoreOS as an alternative to Atomic. First, download the official CoreOS image:: wget http://beta.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2 bunzip2 coreos_production_openstack_image.img.bz2 Upload the image to glance:: openstack image create CoreOS \ --public \ --disk-format=qcow2 \ --container-format=bare \ --property os_distro=coreos \ --file=coreos_production_openstack_image.img Create a CoreOS Kubernetes ClusterTemplate, which is similar to the Atomic Kubernetes ClusterTemplate, except for pointing to a different image:: openstack coe cluster template create k8s-cluster-template-coreos \ --image CoreOS \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --network-driver flannel \ --coe kubernetes Create a CoreOS Kubernetes cluster. Use the CoreOS ClusterTemplate as a template for cluster creation:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template-coreos \ --node-count 2 Using a Kubernetes Cluster ========================== **NOTE:** For the following examples, only one minion node is required in the k8s cluster created previously. Kubernetes provides a number of examples you can use to check that things are working. You may need to download kubectl binary for interacting with k8s cluster using:: curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl We first need to setup the certs to allow Kubernetes to authenticate our connection. Please refer to ``_ for more info on using TLS keys/certs which are setup below. To generate an RSA key, you will use the 'genrsa' command of the 'openssl' tool.:: openssl genrsa -out client.key 4096 To generate a CSR for client authentication, openssl requires a config file that specifies a few values.:: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth END Once you have client.conf, you can run the openssl 'req' command to generate the CSR.:: openssl req -new -days 365 \ -config client.conf \ -key client.key \ -out client.csr Now that you have your client CSR, you can use the Magnum CLI to send it off to Magnum to get it signed and also download the signing cert.:: magnum ca-sign --cluster k8s-cluster --csr client.csr > client.crt magnum ca-show --cluster k8s-cluster > ca.crt Here's how to set up the replicated redis example. Now we create a pod for the redis-master:: # Using cluster-config command for faster configuration eval $(openstack coe cluster config k8s-cluster) # Test the cert and connection works kubectl version cd kubernetes/examples/redis kubectl create -f ./redis-master.yaml Now create a service to provide a discoverable endpoint for the redis sentinels in the cluster:: kubectl create -f ./redis-sentinel-service.yaml To make it a replicated redis cluster create replication controllers for the redis slaves and sentinels:: sed -i 's/\(replicas: \)1/\1 2/' redis-controller.yaml kubectl create -f ./redis-controller.yaml sed -i 's/\(replicas: \)1/\1 2/' redis-sentinel-controller.yaml kubectl create -f ./redis-sentinel-controller.yaml Full lifecycle and introspection operations for each object are supported. For example, openstack coe cluster create, openstack coe cluster template delete. Now there are four redis instances (one master and three slaves) running across the cluster, replicating data between one another. Run the openstack coe cluster show command to get the IP of the cluster host on which the redis-master is running:: $ openstack coe cluster show k8s-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | cff82cd0-189c-4ede-a9cb-2c0af6997709 | | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-05-26T17:45:57+00:00 | | updated_at | 2016-05-26T17:50:02+00:00 | | create_timeout | 60 | | api_address | https://172.24.4.4:6443 | | coe_version | v1.2.0 | | cluster_template_id| e73298e7-e621-4d42-b35b-7a1952b97158 | | master_addresses | ['172.24.4.6'] | | node_count | 1 | | node_addresses | ['172.24.4.5'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/4caaa65f297d4d49ef0a085a7aecf8e0 | | name | k8s-cluster | +--------------------+------------------------------------------------------------+ The output here indicates the redis-master is running on the cluster host with IP address 172.24.4.5. To access the redis master:: $ ssh fedora@172.24.4.5 $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_master | awk '{print $1}') $ sudo docker exec -i -t $REDIS_ID redis-cli 127.0.0.1:6379> set replication:test true OK ^D $ exit # Log out of the host Log into one of the other container hosts and access a redis slave from it. You can use `nova list` to enumerate the kube-minions. For this example we will use the same host as above:: $ ssh fedora@172.24.4.5 $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_redis | awk '{print $1}') $ sudo docker exec -i -t $REDIS_ID redis-cli 127.0.0.1:6379> get replication:test "true" ^D $ exit # Log out of the host Additional useful commands from a given minion:: sudo docker ps # View Docker containers on this minion kubectl get pods # Get pods kubectl get rc # Get replication controllers kubectl get svc # Get services kubectl get nodes # Get nodes After you finish using the cluster, you want to delete it. A cluster can be deleted as follows:: openstack coe cluster delete k8s-cluster Building and Using a Swarm Cluster ================================== Create a ClusterTemplate. It is very similar to the Kubernetes ClusterTemplate, except for the absence of some Kubernetes-specific arguments and the use of 'swarm' as the COE:: openstack coe cluster template create swarm-cluster-template \ --image Fedora-Atomic-27-20180212.2.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --coe swarm **NOTE:** If you are using Magnum behind a firewall then refer to ``_ Finally, create the cluster. Use the ClusterTemplate 'swarm-cluster-template' as a template for cluster creation. This cluster will result in one swarm manager node and two extra agent nodes:: openstack coe cluster create swarm-cluster \ --cluster-template swarm-cluster-template \ --node-count 2 Now that we have a swarm cluster we can start interacting with it:: $ openstack coe cluster show swarm-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | eda91c1e-6103-45d4-ab09-3f316310fa8e | | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | | status_reason | Stack CREATE completed successfully | | created_at | 2015-04-20T19:05:27+00:00 | | updated_at | 2015-04-20T19:06:08+00:00 | | create_timeout | 60 | | api_address | https://172.24.4.4:6443 | | coe_version | 1.2.5 | | cluster_template_id| e73298e7-e621-4d42-b35b-7a1952b97158 | | master_addresses | ['172.24.4.6'] | | node_count | 2 | | node_addresses | ['172.24.4.5'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/4caaa65f297d4d49ef0a085a7aecf8e0 | | name | swarm-cluster | +--------------------+------------------------------------------------------------+ We now need to setup the docker CLI to use the swarm cluster we have created with the appropriate credentials. Create a dir to store certs and cd into it. The `DOCKER_CERT_PATH` env variable is consumed by docker which expects ca.pem, key.pem and cert.pem to be in that directory.:: export DOCKER_CERT_PATH=~/.docker mkdir -p ${DOCKER_CERT_PATH} cd ${DOCKER_CERT_PATH} Generate an RSA key.:: openssl genrsa -out key.pem 4096 Create openssl config to help generated a CSR.:: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = Your Name [req_ext] extendedKeyUsage = clientAuth END Run the openssl 'req' command to generate the CSR.:: openssl req -new -days 365 \ -config client.conf \ -key key.pem \ -out client.csr Now that you have your client CSR use the Magnum CLI to get it signed and also download the signing cert.:: magnum ca-sign --cluster swarm-cluster --csr client.csr > cert.pem magnum ca-show --cluster swarm-cluster > ca.pem Set the CLI to use TLS . This env var is consumed by docker.:: export DOCKER_TLS_VERIFY="1" Set the correct host to use which is the public ip address of swarm API server endpoint. This env var is consumed by docker.:: export DOCKER_HOST=$(openstack coe cluster show swarm-cluster | awk '/ api_address /{print substr($4,7)}') Next we will create a container in this swarm cluster. This container will ping the address 8.8.8.8 four times:: docker run --rm -it cirros:latest ping -c 4 8.8.8.8 You should see a similar output to:: PING 8.8.8.8 (8.8.8.8): 56 data bytes 64 bytes from 8.8.8.8: seq=0 ttl=40 time=25.513 ms 64 bytes from 8.8.8.8: seq=1 ttl=40 time=25.348 ms 64 bytes from 8.8.8.8: seq=2 ttl=40 time=25.226 ms 64 bytes from 8.8.8.8: seq=3 ttl=40 time=25.275 ms --- 8.8.8.8 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 25.226/25.340/25.513 ms Building and Using a Mesos Cluster ================================== Provisioning a mesos cluster requires a Ubuntu-based image with some packages pre-installed. To build and upload such image, please refer to ``_ Alternatively, you can download and upload a pre-built image:: wget https://fedorapeople.org/groups/magnum/ubuntu-mesos-latest.qcow2 openstack image create ubuntu-mesos --public \ --disk-format=qcow2 --container-format=bare \ --property os_distro=ubuntu --file=ubuntu-mesos-latest.qcow2 Then, create a ClusterTemplate by using 'mesos' as the COE, with the rest of arguments similar to the Kubernetes ClusterTemplate:: openstack coe cluster template create mesos-cluster-template --image ubuntu-mesos \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe mesos Finally, create the cluster. Use the ClusterTemplate 'mesos-cluster-template' as a template for cluster creation. This cluster will result in one mesos master node and two mesos slave nodes:: openstack coe cluster create mesos-cluster \ --cluster-template mesos-cluster-template \ --node-count 2 Now that we have a mesos cluster we can start interacting with it. First we need to make sure the cluster's status is 'CREATE_COMPLETE':: $ openstack coe cluster show mesos-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | ff727f0d-72ca-4e2b-9fef-5ec853d74fdf | | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | | status_reason | Stack CREATE completed successfully | | created_at | 2015-06-09T20:21:43+00:00 | | updated_at | 2015-06-09T20:28:18+00:00 | | create_timeout | 60 | | api_address | https://172.24.4.115:6443 | | coe_version | - | | cluster_template_id| 92dbda62-32d4-4435-88fc-8f42d514b347 | | master_addresses | ['172.24.4.115'] | | node_count | 2 | | node_addresses | ['172.24.4.116', '172.24.4.117'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | None | | name | mesos-cluster | +--------------------+------------------------------------------------------------+ Next we will create a container in this cluster by using the REST API of Marathon. This container will ping the address 8.8.8.8:: $ cat > mesos.json << END { "container": { "type": "DOCKER", "docker": { "image": "cirros" } }, "id": "ubuntu", "instances": 1, "cpus": 0.5, "mem": 512, "uris": [], "cmd": "ping 8.8.8.8" } END $ MASTER_IP=$(openstack coe cluster show mesos-cluster | awk '/ api_address /{print $4}') $ curl -X POST -H "Content-Type: application/json" \ http://${MASTER_IP}:8080/v2/apps -d@mesos.json To check application and task status:: $ curl http://${MASTER_IP}:8080/v2/apps $ curl http://${MASTER_IP}:8080/v2/tasks You can access to the Mesos web page at \http://:5050/ and Marathon web console at \http://:8080/. Building Developer Documentation ================================ To build the documentation locally (e.g., to test documentation changes before uploading them for review) chdir to the magnum root folder and run tox:: tox -edocs **NOTE:** The first time you run this will take some extra time as it creates a virtual environment to run in. When complete, the documentation can be accessed from:: doc/build/html/index.html magnum-6.1.0/doc/source/contributor/troubleshooting.rst0000666000175100017510000000200313244017334023424 0ustar zuulzuul00000000000000Developer Troubleshooting Guide ================================ This guide is intended to provide information on how to resolve common problems encountered when developing code for magnum. Troubleshooting MySQL ----------------------- When creating alembic migrations, developers might encounter the ``Multiple head revisions are present for given argument 'head'`` error. This can occur when two migrations revise the same head. For example, the developer creates a migration locally but another migration has already been accepted and merged into master that revises the same head:: $ alembic heads 12345 (your local head) 67890 (new master head) In order to fix this, the developer should update the down_revision of their local migration to point to the head of the new migration in master:: # revision identifiers, used by Alembic. revision = '12345' down_revision = '67890' Now the newest local migration should be head:: $ alembic heads 12345 (your local head) magnum-6.1.0/doc/source/contributor/contributing.rst0000666000175100017510000000011613244017334022707 0ustar zuulzuul00000000000000============ Contributing ============ .. include:: ../../../CONTRIBUTING.rst magnum-6.1.0/doc/source/contributor/api-microversion.rst0000666000175100017510000002656513244017334023506 0ustar zuulzuul00000000000000API Microversions ================= Background ---------- Magnum uses a framework we call 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``OpenStack-API-Version`` which has as its value a string containing the name of the service, ``container-infra``, and a monotonically increasing semantic version number starting from ``1.1``. The full form of the header takes the form:: OpenStack-API-Version: container-infra 1.1 If a user makes a request without specifying a version, they will get the ``BASE_VER`` as defined in ``magnum/api/controllers/versions.py``. This value is currently ``1.1`` and is expected to remain so for quite a long time. When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new clusters/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` clusters/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the list of attributes and data structures accepted. Example: adding a new attribute 'locked': True/False to the request body - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of clusters/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to clusters/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. See [#f2]_ for the 400, 403, 404 and 415 cases. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or 503 does not require a microversion change. It's assumed that clients are not expected to handle a 500 or 503 response and therefore should not need to opt-in to microversion changes that fixes a 500 or 503 response from happening. According to the OpenStack API Working Group, a **500 Internal Server Error** should **not** be returned to the user for failures due to user error that can be fixed by changing the request on the client side. See [#f1]_. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", group=g1, label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 before?"]; new_error[shape="diamond", style="", group=g1, label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", group=g1, label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label=" no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label=" no"]; new_error -> new_attr[label=" no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label=" no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label=" no"]; new_param -> yes[label="yes"]; new_resource -> no[label=" no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** .. [#f1] When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion (except in [#f2]_). The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Magnum. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Magnum versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. .. [#f2] The exception to not needing a microversion when returning a previously unspecified error code is the 400, 403, 404 and 415 cases. This is considered OK to return even if previously unspecified in the code since it's implied given keystone authentication can fail with a 403 and API validation can fail with a 400 for invalid JSON request body. Request to url/resource that does not exist always fails with 404. Invalid content types are handled before API methods are called which results in a 415. .. note:: When in doubt about whether or not a microversion is required for changing an error response code, consult the `Containers Team`_. .. _Containers Team: https://wiki.openstack.org/wiki/Meetings/Containers When a microversion is not needed --------------------------------- A microversion is not needed in the following situation: - the response - Changing the error message without changing the response code does not require a new microversion. - Removing an inapplicable HTTP header, for example, suppose the Retry-After HTTP header is being returned with a 4xx code. This header should only be returned with a 503 or 3xx response, so it may be removed without bumping the microversion. In Code ------- In ``magnum/api/controllers/base.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``1.2``. If they had specified a lower version (or not specified it and received the default of ``1.1``) the server would respond with ``HTTP/406``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2", "1.3") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``1.2`` and ``OpenStack-API-Version`` of <= ``1.3``. If ``1.4`` or later is specified the server will respond with ``HTTP/406``. Changing a method's behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2", "1.3") def my_api_method(self, req, id): .... method_1 ... @base.Controller.api_version("1.4") #noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``1.2``, ``1.3`` (or received the default of ``1.1``) they would see the result from ``method_1``, and for ``1.4`` or later they would see the result from ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behavior within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly accessed with ``pecan.request``). Every API method has an versions object attached to the request object and that can be used to modify behavior based on its value:: def index(self): req_version = pecan.request.headers.get(Version.string) req1_min = versions.Version("1.1") req1_max = versions.Version("1.5") req2_min = versions.Version("1.6") req2_max = versions.Version("1.10") if req_version.matches(req1_min, req1_max): ....stuff.... elif req_version.matches(req2min, req2_max): ....other stuff.... elif req_version > versions.Version("1.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. If the specified minimum version and maximum version are null then ``ValueError`` is returned. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``magnum/api/controllers/versions.py`` * Update ``CURRENT_MAX_VER`` in ``magnum/api/controllers/versions.py`` * Add a verbose description to ``magnum/api/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests, for example in ``magnum/tests/unit/api/controllers/test_base.py``. * Make a new commit to python-magnumclient and update corresponding files to enable the newly added microversion API. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the magnum spec for the change, the minor number of ``CURRENT_MAX_VER`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``CURRENT_MAX_VER``. magnum-6.1.0/doc/source/contributor/index.rst0000666000175100017510000000142213244017334021310 0ustar zuulzuul00000000000000Contributor's Guide =================== Getting Started --------------- If you are new to Magnum, this section contains information that should help you get started as a developer working on the project or contributing to the project. .. toctree:: :maxdepth: 1 Developer Contribution Guide Setting Up Your Development Environment Running Tempest Tests Developer Troubleshooting Guide There are some other important documents also that helps new contributors to contribute effectively towards code standards to the project. .. toctree:: :maxdepth: 1 Writing a Release Note Adding a New API Method Changing Magnum DB Objects api-microversion-history policies magnum-6.1.0/doc/source/contributor/api-microversion-history.rst0000666000175100017510000000007613244017334025172 0ustar zuulzuul00000000000000.. include:: ../../../magnum/api/rest_api_version_history.rst magnum-6.1.0/doc/source/user/0000775000175100017510000000000013244017675016062 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/user/cluster-type-definition.rst0000666000175100017510000001115713244017334023401 0ustar zuulzuul00000000000000There are three key pieces to a Cluster Type Definition: 1. Heat Stack template - The HOT file that Magnum will use to generate a cluster using a Heat Stack. 2. Template definition - Magnum's interface for interacting with the Heat template. 3. Definition Entry Point - Used to advertise the available Cluster Types. The Heat Stack Template ----------------------- The Heat Stack Template is where most of the real work happens. The result of the Heat Stack Template should be a full Container Orchestration Environment. The Template Definition ----------------------- Template definitions are a mapping of Magnum object attributes and Heat template parameters, along with Magnum consumable template outputs. A Cluster Type Definition indicates which Cluster Types it can provide. Cluster Types are how Magnum determines which of the enabled Cluster Type Definitions it will use for a given cluster. The Definition Entry Point -------------------------- Entry points are a standard discovery and import mechanism for Python objects. Each Template Definition should have an Entry Point in the `magnum.template_definitions` group. This example exposes it's Template Definition as `example_template = example_template:ExampleTemplate` in the `magnum.template_definitions` group. Installing Cluster Templates ---------------------------- Because Cluster Type Definitions are basically Python projects, they can be worked with like any other Python project. They can be cloned from version control and installed or uploaded to a package index and installed via utilities such as pip. Enabling a Cluster Type is as simple as adding it's Entry Point to the `enabled_definitions` config option in magnum.conf.:: # Setup python environment and install Magnum $ virtualenv .venv $ . .venv/bin/active (.venv)$ git clone https://github.com/openstack/magnum.git (.venv)$ cd magnum (.venv)$ python setup.py install # List installed templates, notice default templates are enabled (.venv)$ magnum-template-manage list-templates Enabled Templates magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml Disabled Templates # Install example template (.venv)$ cd contrib/templates/example (.venv)$ python setup.py install # List installed templates, notice example template is disabled (.venv)$ magnum-template-manage list-templates Enabled Templates magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml Disabled Templates example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml # Enable example template by setting enabled_definitions in magnum.conf (.venv)$ sudo mkdir /etc/magnum (.venv)$ sudo bash -c "cat > /etc/magnum/magnum.conf << END_CONF [bay] enabled_definitions=magnum_vm_atomic_k8s,magnum_vm_coreos_k8s,example_template END_CONF" # List installed templates, notice example template is now enabled (.venv)$ magnum-template-manage list-templates Enabled Templates example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml Disabled Templates # Use --details argument to get more details about each template (.venv)$ magnum-template-manage list-templates --details Enabled Templates example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml Server_Type OS CoE vm example example_coe magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml Server_Type OS CoE vm fedora-atomic kubernetes magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml Server_Type OS CoE vm coreos kubernetes Disabled Templates magnum-6.1.0/doc/source/user/heat-templates.rst0000666000175100017510000000111413244017334021520 0ustar zuulzuul00000000000000Heat Stack Templates are what Magnum passes to Heat to generate a cluster. For each ClusterTemplate resource in Magnum, a Heat stack is created to arrange all of the cloud resources needed to support the container orchestration environment. These Heat stack templates provide a mapping of Magnum object attributes to Heat template parameters, along with Magnum consumable stack outputs. Magnum passes the Heat Stack Template to the Heat service to create a Heat stack. The result is a full Container Orchestration Environment. .. list-plugins:: magnum.template_definitions :detailed: magnum-6.1.0/doc/source/user/index.rst0000666000175100017510000037250613244017334017732 0ustar zuulzuul00000000000000================= Magnum User Guide ================= This guide is intended for users who use Magnum to deploy and manage clusters of hosts for a Container Orchestration Engine. It describes the infrastructure that Magnum creates and how to work with them. Section 1-3 describe Magnum itself, including an overview, the CLI and Horizon interface. Section 4-9 describe the Container Orchestration Engine (COE) supported along with a guide on how to select one that best meets your needs and how to develop a driver for a new COE. Section 10-15 describe the low level OpenStack infrastructure that is created and managed by Magnum to support the COE's. #. `Overview`_ #. `Python Client`_ #. `Horizon Interface`_ #. `Cluster Drivers`_ #. `Cluster Type Definition`_ #. `Heat Stack Templates`_ #. `Choosing a COE`_ #. `Native Clients`_ #. `Kubernetes`_ #. `Swarm`_ #. `Mesos`_ #. `Transport Layer Security`_ #. `Networking`_ #. `High Availability`_ #. `Scaling`_ #. `Storage`_ #. `Image Management`_ #. `Notification`_ #. `Container Monitoring`_ #. `Kubernetes External Load Balancer`_ Terminology =========== Cluster (previously Bay) A cluster is the construct in which Magnum launches container orchestration engines. After a cluster has been created the user is able to add containers to it either directly, or in the case of the Kubernetes container orchestration engine within pods - a logical construct specific to that implementation. A cluster is created based on a ClusterTemplate. ClusterTemplate (previously BayModel) A ClusterTemplate in Magnum is roughly equivalent to a flavor in Nova. It acts as a template that defines options such as the container orchestration engine, keypair and image for use when Magnum is creating clusters using the given ClusterTemplate. Container Orchestration Engine (COE) A container orchestration engine manages the lifecycle of one or more containers, logically represented in Magnum as a cluster. Magnum supports a number of container orchestration engines, each with their own pros and cons, including Docker Swarm, Kubernetes, and Mesos. Overview ======== Magnum is an OpenStack API service developed by the OpenStack Containers Team making container orchestration engines (COE) such as Docker Swarm, Kubernetes and Apache Mesos available as the first class resources in OpenStack. Magnum uses Heat to orchestrate an OS image which contains Docker and COE and runs that image in either virtual machines or bare metal in a cluster configuration. Magnum offers complete life-cycle management of COEs in an OpenStack environment, integrated with other OpenStack services for a seamless experience for OpenStack users who wish to run containers in an OpenStack environment. Following are few salient features of Magnum: - Standard API based complete life-cycle management for Container Clusters - Multi-tenancy for container clusters - Choice of COE: Kubernetes, Swarm, Mesos, DC/OS - Choice of container cluster deployment model: VM or Bare-metal - Keystone-based multi-tenant security and auth management - Neutron based multi-tenant network control and isolation - Cinder based volume service for containers - Integrated with OpenStack: SSO experience for cloud users - Secure container cluster access (TLS enabled) More details: `Magnum Project Wiki `_ ClusterTemplate --------------- A ClusterTemplate (previously known as BayModel) is a collection of parameters to describe how a cluster can be constructed. Some parameters are relevant to the infrastructure of the cluster, while others are for the particular COE. In a typical workflow, a user would create a ClusterTemplate, then create one or more clusters using the ClusterTemplate. A cloud provider can also define a number of ClusterTemplates and provide them to the users. A ClusterTemplate cannot be updated or deleted if a cluster using this ClusterTemplate still exists. The definition and usage of the parameters of a ClusterTemplate are as follows. They are loosely grouped as: mandatory, infrastructure, COE specific. \ Name of the ClusterTemplate to create. The name does not have to be unique. If multiple ClusterTemplates have the same name, you will need to use the UUID to select the ClusterTemplate when creating a cluster or updating, deleting a ClusterTemplate. If a name is not specified, a random name will be generated using a string and a number, for example "pi-13-model". --coe \ Specify the Container Orchestration Engine to use. Supported COE's include 'kubernetes', 'swarm', 'mesos'. If your environment has additional cluster drivers installed, refer to the cluster driver documentation for the new COE names. This is a mandatory parameter and there is no default value. --image \ The name or UUID of the base image in Glance to boot the servers for the cluster. The image must have the attribute 'os_distro' defined as appropriate for the cluster driver. For the currently supported images, the os_distro names are: ========== ===================== COE os_distro ========== ===================== Kubernetes fedora-atomic, coreos Swarm fedora-atomic Mesos ubuntu ========== ===================== This is a mandatory parameter and there is no default value. Note that the os_distro attribute is case sensitive. --keypair \ The name of the SSH keypair to configure in the cluster servers for ssh access. You will need the key to be able to ssh to the servers in the cluster. The login name is specific to the cluster driver. If keypair is not provided in template it will be required at Cluster create. This value will be overridden by any keypair value that is provided during Cluster create. --external-network \ The name or network ID of a Neutron network to provide connectivity to the external internet for the cluster. This network must be an external network, i.e. its attribute 'router:external' must be 'True'. The servers in the cluster will be connected to a private network and Magnum will create a router between this private network and the external network. This will allow the servers to download images, access discovery service, etc, and the containers to install packages, etc. In the opposite direction, floating IP's will be allocated from the external network to provide access from the external internet to servers and the container services hosted in the cluster. This is a mandatory parameter and there is no default value. --public Access to a ClusterTemplate is normally limited to the admin, owner or users within the same tenant as the owners. Setting this flag makes the ClusterTemplate public and accessible by other users. The default is not public. --server-type \ The servers in the cluster can be VM or baremetal. This parameter selects the type of server to create for the cluster. The default is 'vm'. Possible values are 'vm', 'bm'. --network-driver \ The name of a network driver for providing the networks for the containers. Note that this is different and separate from the Neutron network for the cluster. The operation and networking model are specific to the particular driver; refer to the `Networking`_ section for more details. Supported network drivers and the default driver are: =========== ================= ======== COE Network-Driver Default =========== ================= ======== Kubernetes flannel, calico flannel Swarm docker, flannel flannel Mesos docker docker =========== ================= ======== Note that the network driver name is case sensitive. --volume-driver \ The name of a volume driver for managing the persistent storage for the containers. The functionality supported are specific to the driver. Supported volume drivers and the default driver are: ============= ============= =========== COE Volume-Driver Default ============= ============= =========== Kubernetes cinder No Driver Swarm rexray No Driver Mesos rexray No Driver ============= ============= =========== Note that the volume driver name is case sensitive. --dns-nameserver \ The DNS nameserver for the servers and containers in the cluster to use. This is configured in the private Neutron network for the cluster. The default is '8.8.8.8'. --flavor \ The nova flavor id for booting the node servers. The default is 'm1.small'. This value can be overridden at cluster creation. --master-flavor \ The nova flavor id for booting the master or manager servers. The default is 'm1.small'. This value can be overridden at cluster creation. --http-proxy \ The IP address for a proxy to use when direct http access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is 'None'. --https-proxy \ The IP address for a proxy to use when direct https access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is 'None'. --no-proxy \ When a proxy server is used, some sites should not go through the proxy and should be accessed normally. In this case, you can specify these sites as a comma separated list of IP's. The default is 'None'. --docker-volume-size \ If specified, container images will be stored in a cinder volume of the specified size in GB. Each cluster node will have a volume attached of the above size. If not specified, images will be stored in the compute instance's local disk. For the 'devicemapper' storage driver, the minimum value is 3GB. For the 'overlay' storage driver, the minimum value is 1GB. This value can be overridden at cluster creation. --docker-storage-driver \ The name of a driver to manage the storage for the images and the container's writable layer. The default is 'devicemapper'. --labels \ Arbitrary labels in the form of key=value pairs. The accepted keys and valid values are defined in the cluster drivers. They are used as a way to pass additional parameters that are specific to a cluster driver. Refer to the subsection on labels for a list of the supported key/value pairs and their usage. The value can be overridden at cluster creation. --tls-disabled Transport Layer Security (TLS) is normally enabled to secure the cluster. In some cases, users may want to disable TLS in the cluster, for instance during development or to troubleshoot certain problems. Specifying this parameter will disable TLS so that users can access the COE endpoints without a certificate. The default is TLS enabled. --registry-enabled Docker images by default are pulled from the public Docker registry, but in some cases, users may want to use a private registry. This option provides an alternative registry based on the Registry V2: Magnum will create a local registry in the cluster backed by swift to host the images. Refer to `Docker Registry 2.0 `_ for more details. The default is to use the public registry. --master-lb-enabled Since multiple masters may exist in a cluster, a load balancer is created to provide the API endpoint for the cluster and to direct requests to the masters. In some cases, such as when the LBaaS service is not available, this option can be set to 'false' to create a cluster without the load balancer. In this case, one of the masters will serve as the API endpoint. The default is 'true', i.e. to create the load balancer for the cluster. Labels ------ Labels is a general method to specify supplemental parameters that are specific to certain COE or associated with certain options. Their format is key/value pair and their meaning is interpreted by the drivers that uses them. The drivers do validate the key/value pairs. Their usage is explained in details in the appropriate sections, however, since there are many possible labels, the following table provides a summary to help give a clearer picture. The label keys in the table are linked to more details elsewhere in the user guide. +---------------------------------------+--------------------+---------------+ | label key | label value | default | +=======================================+====================+===============+ | `flannel_network_cidr`_ | IPv4 CIDR | 10.100.0.0/16 | | | | | +---------------------------------------+--------------------+---------------+ | `flannel_backend`_ | - udp | udp | | | - vxlan | | | | - host-gw | | +---------------------------------------+--------------------+---------------+ | `flannel_network_subnetlen`_ | size of subnet to | 24 | | | assign to node | | +---------------------------------------+--------------------+---------------+ | `rexray_preempt`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `mesos_slave_isolation`_ | - filesystem/posix | "" | | | - filesystem/linux | | | | - filesystem/shared| | | | - posix/cpu | | | | - posix/mem | | | | - posix/disk | | | | - cgroups/cpu | | | | - cgroups/mem | | | | - docker/runtime | | | | - namespaces/pid | | +---------------------------------------+--------------------+---------------+ | `mesos_slave_image_providers`_ | - appc | "" | | | - docker | | | | - appc,docker | | +---------------------------------------+--------------------+---------------+ | `mesos_slave_work_dir`_ | (directory name) | "" | +---------------------------------------+--------------------+---------------+ | `mesos_slave_executor_env_variables`_ | (file name) | "" | +---------------------------------------+--------------------+---------------+ | `swarm_strategy`_ | - spread | spread | | | - binpack | | | | - random | | +---------------------------------------+--------------------+---------------+ | `admission_control_list`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `prometheus_monitoring`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `grafana_admin_passwd`_ | (any string) | "admin" | +---------------------------------------+--------------------+---------------+ | `kube_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `etcd_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `flannel_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `kube_dashboard_enabled`_ | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ | `influx_grafana_dashboard_enabled`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `docker_volume_type`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `etcd_volume_size`_ | etcd storage | 0 | | | volume size | | +---------------------------------------+--------------------+---------------+ | `container_infra_prefix`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `availability_zone`_ | AZ for the cluster | "" | | | nodes | | +---------------------------------------+--------------------+---------------+ | `cert_manager_api`_ | see below | false | +---------------------------------------+--------------------+---------------+ | `ingress_controller`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `ingress_controller_role`_ | see below | "ingress" | +---------------------------------------+--------------------+---------------+ | `kubelet_options`_ | extra kubelet args | "" | +---------------------------------------+--------------------+---------------+ | `kubeapi_options`_ | extra kubeapi args | "" | +---------------------------------------+--------------------+---------------+ | `kubescheduler_options`_ | extra kubescheduler| "" | | | args | | +---------------------------------------+--------------------+---------------+ | `kubecontroller_options`_ | extra | "" | | | kubecontroller args| | +---------------------------------------+--------------------+---------------+ | `kubeproxy_options`_ | extra kubeproxy | "" | | | args | | +---------------------------------------+--------------------+---------------+ Cluster ------- A cluster (previously known as bay) is an instance of the ClusterTemplate of a COE. Magnum deploys a cluster by referring to the attributes defined in the particular ClusterTemplate as well as a few additional parameters for the cluster. Magnum deploys the orchestration templates provided by the cluster driver to create and configure all the necessary infrastructure. When ready, the cluster is a fully operational COE that can host containers. Infrastructure -------------- The infrastructure of the cluster consists of the resources provided by the various OpenStack services. Existing infrastructure, including infrastructure external to OpenStack, can also be used by the cluster, such as DNS, public network, public discovery service, Docker registry. The actual resources created depends on the COE type and the options specified; therefore you need to refer to the cluster driver documentation of the COE for specific details. For instance, the option '--master-lb-enabled' in the ClusterTemplate will cause a load balancer pool along with the health monitor and floating IP to be created. It is important to distinguish resources in the IaaS level from resources in the PaaS level. For instance, the infrastructure networking in OpenStack IaaS is different and separate from the container networking in Kubernetes or Swarm PaaS. Typical infrastructure includes the following. Servers The servers host the containers in the cluster and these servers can be VM or bare metal. VM's are provided by Nova. Since multiple VM's are hosted on a physical server, the VM's provide the isolation needed for containers between different tenants running on the same physical server. Bare metal servers are provided by Ironic and are used when peak performance with virtually no overhead is needed for the containers. Identity Keystone provides the authentication and authorization for managing the cluster infrastructure. Network Networking among the servers is provided by Neutron. Since COE currently are not multi-tenant, isolation for multi-tenancy on the networking level is done by using a private network for each cluster. As a result, containers belonging to one tenant will not be accessible to containers or servers of another tenant. Other networking resources may also be used, such as load balancer and routers. Networking among containers can be provided by Kuryr if needed. Storage Cinder provides the block storage that can be used to host the containers and as persistent storage for the containers. Security Barbican provides the storage of secrets such as certificates used for Transport Layer Security (TLS) within the cluster. Life cycle ---------- The set of life cycle operations on the cluster is one of the key value that Magnum provides, enabling clusters to be managed painlessly on OpenStack. The current operations are the basic CRUD operations, but more advanced operations are under discussion in the community and will be implemented as needed. **NOTE** The OpenStack resources created for a cluster are fully accessible to the cluster owner. Care should be taken when modifying or reusing these resources to avoid impacting Magnum operations in unexpected manners. For instance, if you launch your own Nova instance on the cluster private network, Magnum would not be aware of this instance. Therefore, the cluster-delete operation will fail because Magnum would not delete the extra Nova instance and the private Neutron network cannot be removed while a Nova instance is still attached. **NOTE** Currently Heat nested templates are used to create the resources; therefore if an error occurs, you can troubleshoot through Heat. For more help on Heat stack troubleshooting, refer to the `Troubleshooting Guide `_. Create ++++++ **NOTE** bay- are the deprecated versions of these commands and are still support in current release. They will be removed in a future version. Any references to the term bay will be replaced in the parameters when using the 'bay' versions of the commands. For example, in 'bay-create' --baymodel is used as the baymodel parameter for this command instead of --cluster-template. The 'cluster-create' command deploys a cluster, for example:: openstack coe cluster create mycluster \ --cluster-template mytemplate \ --node-count 8 \ --master-count 3 The 'cluster-create' operation is asynchronous; therefore you can initiate another 'cluster-create' operation while the current cluster is being created. If the cluster fails to be created, the infrastructure created so far may be retained or deleted depending on the particular orchestration engine. As a common practice, a failed cluster is retained during development for troubleshooting, but they are automatically deleted in production. The current cluster drivers use Heat templates and the resources of a failed 'cluster-create' are retained. The definition and usage of the parameters for 'cluster-create' are as follows: \ Name of the cluster to create. If a name is not specified, a random name will be generated using a string and a number, for example "gamma-7-cluster". --cluster-template \ The ID or name of the ClusterTemplate to use. This is a mandatory parameter. Once a ClusterTemplate is used to create a cluster, it cannot be deleted or modified until all clusters that use the ClusterTemplate have been deleted. --keypair \ The name of the SSH keypair to configure in the cluster servers for ssh access. You will need the key to be able to ssh to the servers in the cluster. The login name is specific to the cluster driver. If keypair is not provided it will attempt to use the value in the ClusterTemplate. If the ClusterTemplate is also missing a keypair value then an error will be returned. The keypair value provided here will override the keypair value from the ClusterTemplate. --node-count \ The number of servers that will serve as node in the cluster. The default is 1. --master-count \ The number of servers that will serve as master for the cluster. The default is 1. Set to more than 1 master to enable High Availability. If the option '--master-lb-enabled' is specified in the ClusterTemplate, the master servers will be placed in a load balancer pool. --discovery-url \ The custom discovery url for node discovery. This is used by the COE to discover the servers that have been created to host the containers. The actual discovery mechanism varies with the COE. In some cases, Magnum fills in the server info in the discovery service. In other cases, if the discovery-url is not specified, Magnum will use the public discovery service at:: https://discovery.etcd.io In this case, Magnum will generate a unique url here for each cluster and store the info for the servers. --timeout \ The timeout for cluster creation in minutes. The value expected is a positive integer and the default is 60 minutes. If the timeout is reached during cluster-create, the operation will be aborted and the cluster status will be set to 'CREATE_FAILED'. List ++++ The 'cluster-list' command lists all the clusters that belong to the tenant, for example:: openstack coe cluster list Show ++++ The 'cluster-show' command prints all the details of a cluster, for example:: openstack coe cluster show mycluster The properties include those not specified by users that have been assigned default values and properties from new resources that have been created for the cluster. Update ++++++ A cluster can be modified using the 'cluster-update' command, for example:: openstack coe cluster update mycluster replace node_count=8 The parameters are positional and their definition and usage are as follows. \ This is the first parameter, specifying the UUID or name of the cluster to update. \ This is the second parameter, specifying the desired change to be made to the cluster attributes. The allowed changes are 'add', 'replace' and 'remove'. \ This is the third parameter, specifying the targeted attributes in the cluster as a list separated by blank space. To add or replace an attribute, you need to specify the value for the attribute. To remove an attribute, you only need to specify the name of the attribute. Currently the only attribute that can be replaced or removed is 'node_count'. The attributes 'name', 'master_count' and 'discovery_url' cannot be replaced or delete. The table below summarizes the possible change to a cluster. +---------------+-----+------------------+-----------------------+ | Attribute | add | replace | remove | +===============+=====+==================+=======================+ | node_count | no | add/remove nodes | reset to default of 1 | +---------------+-----+------------------+-----------------------+ | master_count | no | no | no | +---------------+-----+------------------+-----------------------+ | name | no | no | no | +---------------+-----+------------------+-----------------------+ | discovery_url | no | no | no | +---------------+-----+------------------+-----------------------+ The 'cluster-update' operation cannot be initiated when another operation is in progress. **NOTE:** The attribute names in cluster-update are slightly different from the corresponding names in the cluster-create command: the dash '-' is replaced by an underscore '_'. For instance, 'node-count' in cluster-create is 'node_count' in cluster-update. Scale +++++ Scaling a cluster means adding servers to or removing servers from the cluster. Currently, this is done through the 'cluster-update' operation by modifying the node-count attribute, for example:: openstack coe cluster update mycluster replace node_count=2 When some nodes are removed, Magnum will attempt to find nodes with no containers to remove. If some nodes with containers must be removed, Magnum will log a warning message. Delete ++++++ The 'cluster-delete' operation removes the cluster by deleting all resources such as servers, network, storage; for example:: openstack coe cluster delete mycluster The only parameter for the cluster-delete command is the ID or name of the cluster to delete. Multiple clusters can be specified, separated by a blank space. If the operation fails, there may be some remaining resources that have not been deleted yet. In this case, you can troubleshoot through Heat. If the templates are deleted manually in Heat, you can delete the cluster in Magnum to clean up the cluster from Magnum database. The 'cluster-delete' operation can be initiated when another operation is still in progress. Python Client ============= Installation ------------ Follow the instructions in the OpenStack Installation Guide to enable the repositories for your distribution: * `RHEL/CentOS/Fedora `_ * `Ubuntu/Debian `_ * `openSUSE/SUSE Linux Enterprise `_ Install using distribution packages for RHEL/CentOS/Fedora:: $ sudo yum install python-magnumclient Install using distribution packages for Ubuntu/Debian:: $ sudo apt-get install python-magnumclient Install using distribution packages for OpenSuSE and SuSE Enterprise Linux:: $ sudo zypper install python-magnumclient Verifying installation ---------------------- Execute the `openstack help coe` command to confirm that the client is installed and in the system path:: $ openstack help coe 1.1.0 Using the command-line client ----------------------------- Refer to the `OpenStack Command-Line Interface Reference `_ for a full list of the commands supported by the `openstack coe` command-line client. Horizon Interface ================= Magnum provides a Horizon plugin so that users can access the Container Infrastructure Management service through the OpenStack browser-based graphical UI. The plugin is available from `magnum-ui `_. It is not installed by default in the standard Horizon service, but you can follow the instruction for `installing a Horizon plugin `_. In Horizon, the container infrastructure panel is part of the 'Project' view and it currently supports the following operations: - View list of cluster templates - View details of a cluster template - Create a cluster template - Delete a cluster template - View list of clusters - View details of a cluster - Create a cluster - Delete a cluster - Get the Certificate Authority for a cluster - Sign a user key and obtain a signed certificate for accessing the secured COE API endpoint in a cluster. Other operations are not yet supported and the CLI should be used for these. Following is the screenshot of the Horizon view showing the list of cluster templates. .. image:: ../images/cluster-template.png Following is the screenshot of the Horizon view showing the details of a cluster template. .. image:: ../images/cluster-template-details.png Following is the screenshot of the dialog to create a new cluster. .. image:: ../images/cluster-create.png Cluster Drivers =============== A cluster driver is a collection of python code, heat templates, scripts, images, and documents for a particular COE on a particular distro. Magnum presents the concept of ClusterTemplates and clusters. The implementation for a particular cluster type is provided by the cluster driver. In other words, the cluster driver provisions and manages the infrastructure for the COE. Magnum includes default drivers for the following COE and distro pairs: +------------+---------------+ | COE | distro | +============+===============+ | Kubernetes | Fedora Atomic | +------------+---------------+ | Kubernetes | CoreOS | +------------+---------------+ | Swarm | Fedora Atomic | +------------+---------------+ | Mesos | Ubuntu | +------------+---------------+ Magnum is designed to accommodate new cluster drivers to support custom COE's and this section describes how a new cluster driver can be constructed and enabled in Magnum. Directory structure ------------------- Magnum expects the components to be organized in the following directory structure under the directory 'drivers':: COE_Distro/ image/ templates/ api.py driver.py monitor.py scale.py template_def.py version.py The minimum required components are: driver.py Python code that implements the controller operations for the particular COE. The driver must implement: Currently supported: ``cluster_create``, ``cluster_update``, ``cluster_delete``. templates A directory of orchestration templates for managing the lifecycle of clusters, including creation, configuration, update, and deletion. Currently only Heat templates are supported, but in the future other orchestration mechanism such as Ansible may be supported. template_def.py Python code that maps the parameters from the ClusterTemplate to the input parameters for the orchestration and invokes the orchestration in the templates directory. version.py Tracks the latest version of the driver in this directory. This is defined by a ``version`` attribute and is represented in the form of ``1.0.0``. It should also include a ``Driver`` attribute with descriptive name such as ``fedora_swarm_atomic``. The remaining components are optional: image Instructions for obtaining or building an image suitable for the COE. api.py Python code to interface with the COE. monitor.py Python code to monitor the resource utilization of the cluster. scale.py Python code to scale the cluster by adding or removing nodes. Sample cluster driver --------------------- To help developers in creating new COE drivers, a minimal cluster driver is provided as an example. The 'docker' cluster driver will simply deploy a single VM running Ubuntu with the latest Docker version installed. It is not a true cluster, but the simplicity will help to illustrate the key concepts. *To be filled in* Installing a cluster driver --------------------------- *To be filled in* Cluster Type Definition ======================= .. include:: cluster-type-definition.rst Heat Stack Templates ==================== .. include:: heat-templates.rst Choosing a COE ============== Magnum supports a variety of COE options, and allows more to be added over time as they gain popularity. As an operator, you may choose to support the full variety of options, or you may want to offer a subset of the available choices. Given multiple choices, your users can run one or more clusters, and each may use a different COE. For example, I might have multiple clusters that use Kubernetes, and just one cluster that uses Swarm. All of these clusters can run concurrently, even though they use different COE software. Choosing which COE to use depends on what tools you want to use to manage your containers once you start your app. If you want to use the Docker tools, you may want to use the Swarm cluster type. Swarm will spread your containers across the various nodes in your cluster automatically. It does not monitor the health of your containers, so it can't restart them for you if they stop. It will not automatically scale your app for you (as of Swarm version 1.2.2). You may view this as a plus. If you prefer to manage your application yourself, you might prefer swarm over the other COE options. Kubernetes (as of v1.2) is more sophisticated than Swarm (as of v1.2.2). It offers an attractive YAML file description of a pod, which is a grouping of containers that run together as part of a distributed application. This file format allows you to model your application deployment using a declarative style. It has support for auto scaling and fault recovery, as well as features that allow for sophisticated software deployments, including canary deploys and blue/green deploys. Kubernetes is very popular, especially for web applications. Apache Mesos is a COE that has been around longer than Kubernetes or Swarm. It allows for a variety of different frameworks to be used along with it, including Marathon, Aurora, Chronos, Hadoop, and `a number of others. `_ The Apache Mesos framework design can be used to run alternate COE software directly on Mesos. Although this approach is not widely used yet, it may soon be possible to run Mesos with Kubernetes and Swarm as frameworks, allowing you to share the resources of a cluster between multiple different COEs. Until this option matures, we encourage Magnum users to create multiple clusters, and use the COE in each cluster that best fits the anticipated workload. Finding the right COE for your workload is up to you, but Magnum offers you a choice to select among the prevailing leading options. Once you decide, see the next sections for examples of how to create a cluster with your desired COE. Native Clients ============== Magnum preserves the native user experience with a COE and does not provide a separate API or client. This means you will need to use the native client for the particular cluster type to interface with the clusters. In the typical case, there are two clients to consider: COE level This is the orchestration or management level such as Kubernetes, Swarm, Mesos and its frameworks. Container level This is the low level container operation. Currently it is Docker for all clusters. The clients can be CLI and/or browser-based. You will need to refer to the documentation for the specific native client and appropriate version for details, but following are some pointers for reference. Kubernetes CLI is the tool 'kubectl', which can be simply copied from a node in the cluster or downloaded from the Kubernetes release. For instance, if the cluster is running Kubernetes release 1.2.0, the binary for 'kubectl' can be downloaded as and set up locally as follows:: curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x kubectl sudo mv kubectl /usr/local/bin/kubectl Kubernetes also provides a browser UI. If the cluster has the Kubernetes Dashboard running; it can be accessed using:: eval $(openstack coe cluster config ) kubectl proxy The browser can be accessed at http://localhost:8001/ui For Swarm, the main CLI is 'docker', along with associated tools such as 'docker-compose', etc. Specific version of the binaries can be obtained from the `Docker Engine installation `_. Mesos cluster uses the Marathon framework and details on the Marathon UI can be found in the section `Using Marathon`_. Depending on the client requirement, you may need to use a version of the client that matches the version in the cluster. To determine the version of the COE and container, use the command 'cluster-show' and look for the attribute *coe_version* and *container_version*:: openstack coe cluster show k8s-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | 2016-07-25T23:14:10+00:00 | | create_timeout | 60 | | coe_version | v1.2.0 | | api_address | https://192.168.19.86:6443 | | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | | master_addresses | ['192.168.19.87'] | | node_count | 1 | | node_addresses | ['192.168.19.88'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | | name | k8s-cluster | +--------------------+------------------------------------------------------------+ Kubernetes ========== Kubernetes uses a range of terminology that we refer to in this guide. We define these common terms for your reference: Pod When using the Kubernetes container orchestration engine, a pod is the smallest deployable unit that can be created and managed. A pod is a co-located group of application containers that run with a shared context. When using Magnum, pods are created and managed within clusters. Refer to the `pods section `_ in the `Kubernetes User Guide`_ for more information. Replication controller A replication controller is used to ensure that at any given time a certain number of replicas of a pod are running. Pods are automatically created and deleted by the replication controller as necessary based on a template to ensure that the defined number of replicas exist. Refer to the `replication controller section `_ in the `Kubernetes User Guide`_ for more information. Service A service is an additional layer of abstraction provided by the Kubernetes container orchestration engine which defines a logical set of pods and a policy for accessing them. This is useful because pods are created and deleted by a replication controller, for example, other pods needing to discover them can do so via the service abstraction. Refer to the `services section `_ in the `Kubernetes User Guide`_ for more information. .. _Kubernetes User Guide: http://kubernetes.io/v1.0/docs/user-guide/ When Magnum deploys a Kubernetes cluster, it uses parameters defined in the ClusterTemplate and specified on the cluster-create command, for example:: openstack coe cluster template create k8s-cluster-template \ --image fedora-atomic-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --master-count 3 \ --node-count 8 Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of parameters. Following are further details relevant to a Kubernetes cluster: Number of masters (master-count) Specified in the cluster-create command to indicate how many servers will run as master in the cluster. Having more than one will provide high availability. The masters will be in a load balancer pool and the virtual IP address (VIP) of the load balancer will serve as the Kubernetes API endpoint. For external access, a floating IP associated with this VIP is available and this is the endpoint shown for Kubernetes in the 'cluster-show' command. Number of nodes (node-count) Specified in the cluster-create command to indicate how many servers will run as node in the cluster to host the users' pods. The nodes are registered in Kubernetes using the Nova instance name. Network driver (network-driver) Specified in the ClusterTemplate to select the network driver. The supported and default network driver is 'flannel', an overlay network providing a flat network for all pods. Refer to the `Networking`_ section for more details. Volume driver (volume-driver) Specified in the ClusterTemplate to select the volume driver. The supported volume driver is 'cinder', allowing Cinder volumes to be mounted in containers for use as persistent storage. Data written to these volumes will persist after the container exits and can be accessed again from other containers, while data written to the union file system hosting the container will be deleted. Refer to the `Storage`_ section for more details. Storage driver (docker-storage-driver) Specified in the ClusterTemplate to select the Docker storage driver. The default is 'devicemapper'. Refer to the `Storage`_ section for more details. Image (image) Specified in the ClusterTemplate to indicate the image to boot the servers. The image binary is loaded in Glance with the attribute 'os_distro = fedora-atomic'. Current supported images are Fedora Atomic (download from `Fedora `_ ) and CoreOS (download from `CoreOS `_ ) TLS (tls-disabled) Transport Layer Security is enabled by default, so you need a key and signed certificate to access the Kubernetes API and CLI. Magnum handles its own key and certificate when interfacing with the Kubernetes cluster. In development mode, TLS can be disabled. Refer to the 'Transport Layer Security'_ section for more details. What runs on the servers The servers for Kubernetes master host containers in the 'kube-system' name space to run the Kubernetes proxy, scheduler and controller manager. The masters will not host users' pods. Kubernetes API server, docker daemon, etcd and flannel run as systemd services. The servers for Kubernetes node also host a container in the 'kube-system' name space to run the Kubernetes proxy, while Kubernetes kubelet, docker daemon and flannel run as systemd services. Log into the servers You can log into the master servers using the login 'fedora' and the keypair specified in the ClusterTemplate. In addition to the common attributes in the ClusterTemplate, you can specify the following attributes that are specific to Kubernetes by using the labels attribute. _`admission_control_list` This label corresponds to Kubernetes parameter for the API server '--admission-control'. For more details, refer to the `Admission Controllers `_. The default value corresponds to the one recommended in this doc for our current Kubernetes version. _`etcd_volume_size` This label sets the size of a volume holding the etcd storage data. The default value is 0, meaning the etcd data is not persisted (no volume). _`container_infra_prefix` Prefix of all container images used in the cluster (kubernetes components, coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, with this label it can be changed to myregistry.example.com/mycloud/kubernetes-apiserver. Similarly, all other components used in the cluster will be prefixed with this label, which assumes an operator has cloned all expected images in myregistry.example.com/mycloud. Images that must be mirrored: * docker.io/coredns/coredns:011 * docker.io/grafana/grafana:latest * docker.io/openstackmagnum/kubernetes-apiserver * docker.io/openstackmagnum/kubernetes-controller-manager * docker.io/openstackmagnum/kubernetes-kubelet * docker.io/openstackmagnum/kubernetes-proxy * docker.io/openstackmagnum/kubernetes-scheduler * docker.io/openstackmagnum/etcd * docker.io/openstackmagnum/flannel * docker.io/prom/node-exporter:latest * docker.io/prom/prometheus:latest * gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1 * gcr.io/google_containers/pause:3.0 _`kube_tag` This label allows users to select `a specific Kubernetes release, based on its container tag `_. If unset, the current Magnum version's default Kubernetes release is installed. _`etcd_tag` This label allows users to select `a specific etcd version, based on its container tag `_. If unset, the current Magnum version's a default etcd version. For queens, v3.2.7 _`flannel_tag` This label allows users to select `a specific flannel version, based on its container tag `_. If unset, the current Magnum version's a default flannel version. For queens, v0.9.0 _`kube_dashboard_enabled` This label triggers the deployment of the kubernetes dashboard. The default value is 1, meaning it will be enabled. _`cert_manager_api` This label enables the kubernetes `certificate manager api `_. _`kubelet_options` This label can hold any additional options to be passed to the kubelet. For more details, refer to the `kubelet admin guide `_. By default no additional options are passed. _`kubeproxy_options` This label can hold any additional options to be passed to the kube proxy. For more details, refer to the `kube proxy admin guide `_. By default no additional options are passed. _`kubecontroller_options` This label can hold any additional options to be passed to the kube controller manager. For more details, refer to the `kube controller manager admin guide `_. By default no additional options are passed. _`kubeapi_options` This label can hold any additional options to be passed to the kube api server. For more details, refer to the `kube api admin guide `_. By default no additional options are passed. _`kubescheduler_options` This label can hold any additional options to be passed to the kube scheduler. For more details, refer to the `kube scheduler admin guide `_. By default no additional options are passed. _`influx_grafana_dashboard_enabled` The kubernetes dashboard comes with heapster enabled. If this label is set, an influxdb and grafana instance will be deployed, heapster will push data to influx and grafana will project them. External load balancer for services ----------------------------------- All Kubernetes pods and services created in the cluster are assigned IP addresses on a private container network so they can access each other and the external internet. However, these IP addresses are not accessible from an external network. To publish a service endpoint externally so that the service can be accessed from the external network, Kubernetes provides the external load balancer feature. This is done by simply specifying in the service manifest the attribute "type: LoadBalancer". Magnum enables and configures the Kubernetes plugin for OpenStack so that it can interface with Neutron and manage the necessary networking resources. When the service is created, Kubernetes will add an external load balancer in front of the service so that the service will have an external IP address in addition to the internal IP address on the container network. The service endpoint can then be accessed with this external IP address. Kubernetes handles all the life cycle operations when pods are modified behind the service and when the service is deleted. Refer to the `Kubernetes External Load Balancer`_ section for more details. Ingress Controller ------------------ In addition to the LoadBalancer described above, Kubernetes can also be configured with an Ingress Controller. Ingress can provide load balancing, SSL termination and name-based virtual hosting. Magnum allows selecting one of multiple controller options via the 'ingress_controller' label. Check the Kubernetes documentation to define your own Ingress resources. _`ingress_controller` This label sets the Ingress Controller to be used. Currently only traefik is supported. The default is '', meaning no Ingress Controller configured. _`ingress_controller_role` This label defines the role nodes should have to run an instance of the Ingress Controller. This gives operators full control on which nodes should be running an instance of the controller, and should be set in multiple nodes for availability. Default is 'ingress'. An example of setting this in a Kubernetes node would be:: kubectl label node role=ingress Swarm ===== A Swarm cluster is a pool of servers running Docker daemon that is managed as a single Docker host. One or more Swarm managers accepts the standard Docker API and manage this pool of servers. Magnum deploys a Swarm cluster using parameters defined in the ClusterTemplate and specified on the 'cluster-create' command, for example:: openstack coe cluster template create swarm-cluster-template \ --image fedora-atomic-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --coe swarm openstack coe cluster create swarm-cluster \ --cluster-template swarm-cluster-template \ --master-count 3 \ --node-count 8 Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of parameters. Following are further details relevant to Swarm: What runs on the servers There are two types of servers in the Swarm cluster: managers and nodes. The Docker daemon runs on all servers. On the servers for manager, the Swarm manager is run as a Docker container on port 2376 and this is initiated by the systemd service swarm-manager. Etcd is also run on the manager servers for discovery of the node servers in the cluster. On the servers for node, the Swarm agent is run as a Docker container on port 2375 and this is initiated by the systemd service swarm-agent. On start up, the agents will register themselves in etcd and the managers will discover the new node to manage. Number of managers (master-count) Specified in the cluster-create command to indicate how many servers will run as managers in the cluster. Having more than one will provide high availability. The managers will be in a load balancer pool and the load balancer virtual IP address (VIP) will serve as the Swarm API endpoint. A floating IP associated with the load balancer VIP will serve as the external Swarm API endpoint. The managers accept the standard Docker API and perform the corresponding operation on the servers in the pool. For instance, when a new container is created, the managers will select one of the servers based on some strategy and schedule the containers there. Number of nodes (node-count) Specified in the cluster-create command to indicate how many servers will run as nodes in the cluster to host your Docker containers. These servers will register themselves in etcd for discovery by the managers, and interact with the managers. Docker daemon is run locally to host containers from users. Network driver (network-driver) Specified in the ClusterTemplate to select the network driver. The supported drivers are 'docker' and 'flannel', with 'docker' as the default. With the 'docker' driver, containers are connected to the 'docker0' bridge on each node and are assigned local IP address. With the 'flannel' driver, containers are connected to a flat overlay network and are assigned IP address by Flannel. Refer to the `Networking`_ section for more details. Volume driver (volume-driver) Specified in the ClusterTemplate to select the volume driver to provide persistent storage for containers. The supported volume driver is 'rexray'. The default is no volume driver. When 'rexray' or other volume driver is deployed, you can use the Docker 'volume' command to create, mount, unmount, delete volumes in containers. Cinder block storage is used as the backend to support this feature. Refer to the `Storage`_ section for more details. Storage driver (docker-storage-driver) Specified in the ClusterTemplate to select the Docker storage driver. The default is 'devicemapper'. Refer to the `Storage`_ section for more details. Image (image) Specified in the ClusterTemplate to indicate the image to boot the servers for the Swarm manager and node. The image binary is loaded in Glance with the attribute 'os_distro = fedora-atomic'. Current supported image is Fedora Atomic (download from `Fedora `_ ) TLS (tls-disabled) Transport Layer Security is enabled by default to secure the Swarm API for access by both the users and Magnum. You will need a key and a signed certificate to access the Swarm API and CLI. Magnum handles its own key and certificate when interfacing with the Swarm cluster. In development mode, TLS can be disabled. Refer to the 'Transport Layer Security'_ section for details on how to create your key and have Magnum sign your certificate. Log into the servers You can log into the manager and node servers with the account 'fedora' and the keypair specified in the ClusterTemplate. In addition to the common attributes in the ClusterTemplate, you can specify the following attributes that are specific to Swarm by using the labels attribute. _`swarm_strategy` This label corresponds to Swarm parameter for master '--strategy'. For more details, refer to the `Swarm Strategy `_. Valid values for this label are: - spread - binpack - random Mesos ===== A Mesos cluster consists of a pool of servers running as Mesos slaves, managed by a set of servers running as Mesos masters. Mesos manages the resources from the slaves but does not itself deploy containers. Instead, one of more Mesos frameworks running on the Mesos cluster would accept user requests on their own endpoint, using their particular API. These frameworks would then negotiate the resources with Mesos and the containers are deployed on the servers where the resources are offered. Magnum deploys a Mesos cluster using parameters defined in the ClusterTemplate and specified on the 'cluster-create' command, for example:: openstack coe cluster template create mesos-cluster-template \ --image ubuntu-mesos \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe mesos openstack coe cluster create mesos-cluster \ --cluster-template mesos-cluster-template \ --master-count 3 \ --node-count 8 Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of parameters. Following are further details relevant to Mesos: What runs on the servers There are two types of servers in the Mesos cluster: masters and slaves. The Docker daemon runs on all servers. On the servers for master, the Mesos master is run as a process on port 5050 and this is initiated by the upstart service 'mesos-master'. Zookeeper is also run on the master servers, initiated by the upstart service 'zookeeper'. Zookeeper is used by the master servers for electing the leader among the masters, and by the slave servers and frameworks to determine the current leader. The framework Marathon is run as a process on port 8080 on the master servers, initiated by the upstart service 'marathon'. On the servers for slave, the Mesos slave is run as a process initiated by the upstart service 'mesos-slave'. Number of master (master-count) Specified in the cluster-create command to indicate how many servers will run as masters in the cluster. Having more than one will provide high availability. If the load balancer option is specified, the masters will be in a load balancer pool and the load balancer virtual IP address (VIP) will serve as the Mesos API endpoint. A floating IP associated with the load balancer VIP will serve as the external Mesos API endpoint. Number of agents (node-count) Specified in the cluster-create command to indicate how many servers will run as Mesos slave in the cluster. Docker daemon is run locally to host containers from users. The slaves report their available resources to the master and accept request from the master to deploy tasks from the frameworks. In this case, the tasks will be to run Docker containers. Network driver (network-driver) Specified in the ClusterTemplate to select the network driver. Currently 'docker' is the only supported driver: containers are connected to the 'docker0' bridge on each node and are assigned local IP address. Refer to the `Networking`_ section for more details. Volume driver (volume-driver) Specified in the ClusterTemplate to select the volume driver to provide persistent storage for containers. The supported volume driver is 'rexray'. The default is no volume driver. When 'rexray' or other volume driver is deployed, you can use the Docker 'volume' command to create, mount, unmount, delete volumes in containers. Cinder block storage is used as the backend to support this feature. Refer to the `Storage`_ section for more details. Storage driver (docker-storage-driver) This is currently not supported for Mesos. Image (image) Specified in the ClusterTemplate to indicate the image to boot the servers for the Mesos master and slave. The image binary is loaded in Glance with the attribute 'os_distro = ubuntu'. You can download the `ready-built image `_, or you can create the image as described below in the `Building Mesos image`_ section. TLS (tls-disabled) Transport Layer Security is currently not implemented yet for Mesos. Log into the servers You can log into the manager and node servers with the account 'ubuntu' and the keypair specified in the ClusterTemplate. In addition to the common attributes in the baymodel, you can specify the following attributes that are specific to Mesos by using the labels attribute. _`rexray_preempt` When the volume driver 'rexray' is used, you can mount a data volume backed by Cinder to a host to be accessed by a container. In this case, the label 'rexray_preempt' can optionally be set to True or False to enable any host to take control of the volume regardless of whether other hosts are using the volume. This will in effect unmount the volume from the current host and remount it on the new host. If this label is set to false, then rexray will ensure data safety for locking the volume before remounting. The default value is False. _`mesos_slave_isolation` This label corresponds to the Mesos parameter for slave '--isolation'. The isolators are needed to provide proper isolation according to the runtime configurations specified in the container image. For more details, refer to the `Mesos configuration `_ and the `Mesos container image support `_. Valid values for this label are: - filesystem/posix - filesystem/linux - filesystem/shared - posix/cpu - posix/mem - posix/disk - cgroups/cpu - cgroups/mem - docker/runtime - namespaces/pid _`mesos_slave_image_providers` This label corresponds to the Mesos parameter for agent '--image_providers', which tells Mesos containerizer what types of container images are allowed. For more details, refer to the `Mesos configuration `_ and the `Mesos container image support `_. Valid values are: - appc - docker - appc,docker _`mesos_slave_work_dir` This label corresponds to the Mesos parameter '--work_dir' for slave. For more details, refer to the `Mesos configuration `_. Valid value is a directory path to use as the work directory for the framework, for example:: mesos_slave_work_dir=/tmp/mesos _`mesos_slave_executor_env_variables` This label corresponds to the Mesos parameter for slave '--executor_environment_variables', which passes additional environment variables to the executor and subsequent tasks. For more details, refer to the `Mesos configuration `_. Valid value is the name of a JSON file, for example:: mesos_slave_executor_env_variables=/home/ubuntu/test.json The JSON file should contain environment variables, for example:: { "PATH": "/bin:/usr/bin", "LD_LIBRARY_PATH": "/usr/local/lib" } By default the executor will inherit the slave's environment variables. Building Mesos image -------------------- The boot image for Mesos cluster is an Ubuntu 14.04 base image with the following middleware pre-installed: - ``docker`` - ``zookeeper`` - ``mesos`` - ``marathon`` The cluster driver provides two ways to create this image, as follows. Diskimage-builder +++++++++++++++++ To run the `diskimage-builder `__ tool manually, use the provided `elements `__. Following are the typical steps to use the diskimage-builder tool on an Ubuntu server:: $ sudo apt-get update $ sudo apt-get install git qemu-utils python-pip $ sudo pip install diskimage-builder $ git clone https://git.openstack.org/openstack/magnum $ git clone https://git.openstack.org/openstack/dib-utils.git $ git clone https://git.openstack.org/openstack/tripleo-image-elements.git $ git clone https://git.openstack.org/openstack/heat-templates.git $ export PATH="${PWD}/dib-utils/bin:$PATH" $ export ELEMENTS_PATH=tripleo-image-elements/elements:heat-templates/hot/software-config/elements:magnum/magnum/drivers/mesos_ubuntu_v1/image/mesos $ export DIB_RELEASE=trusty $ disk-image-create ubuntu vm docker mesos \ os-collect-config os-refresh-config os-apply-config \ heat-config heat-config-script \ -o ubuntu-mesos.qcow2 Dockerfile ++++++++++ To build the image as above but within a Docker container, use the provided `Dockerfile `__. The output image will be saved as '/tmp/ubuntu-mesos.qcow2'. Following are the typical steps to run a Docker container to build the image:: $ git clone https://git.openstack.org/openstack/magnum $ cd magnum/magnum/drivers/mesos_ubuntu_v1/image $ sudo docker build -t magnum/mesos-builder . $ sudo docker run -v /tmp:/output --rm -ti --privileged magnum/mesos-builder ... Image file /output/ubuntu-mesos.qcow2 created... Using Marathon -------------- Marathon is a Mesos framework for long running applications. Docker containers can be deployed via Marathon's REST API. To get the endpoint for Marathon, run the cluster-show command and look for the property 'api_address'. Marathon's endpoint is port 8080 on this IP address, so the web console can be accessed at:: http://:8080/ Refer to Marathon documentation for details on running applications. For example, you can 'post' a JSON app description to ``http://:8080/apps`` to deploy a Docker container:: $ cat > app.json << END { "container": { "type": "DOCKER", "docker": { "image": "libmesos/ubuntu" } }, "id": "ubuntu", "instances": 1, "cpus": 0.5, "mem": 512, "uris": [], "cmd": "while sleep 10; do date -u +%T; done" } END $ API_ADDRESS=$(openstack coe cluster show mesos-cluster | awk '/ api_address /{print $4}') $ curl -X POST -H "Content-Type: application/json" \ http://${API_ADDRESS}:8080/v2/apps -d@app.json Transport Layer Security ======================== Magnum uses TLS to secure communication between a cluster's services and the outside world. TLS is a complex subject, and many guides on it exist already. This guide will not attempt to fully describe TLS, but instead will only cover the necessary steps to get a client set up to talk to a cluster with TLS. A more in-depth guide on TLS can be found in the `OpenSSL Cookbook `_ by Ivan Ristić. TLS is employed at 3 points in a cluster: 1. By Magnum to communicate with the cluster API endpoint 2. By the cluster worker nodes to communicate with the master nodes 3. By the end-user when they use the native client libraries to interact with the cluster. This applies to both a CLI or a program that uses a client for the particular cluster. Each client needs a valid certificate to authenticate and communicate with a cluster. The first two cases are implemented internally by Magnum and are not exposed to the users, while the last case involves the users and is described in more details below. Deploying a secure cluster -------------------------- Current TLS support is summarized below: +------------+-------------+ | COE | TLS support | +============+=============+ | Kubernetes | yes | +------------+-------------+ | Swarm | yes | +------------+-------------+ | Mesos | no | +------------+-------------+ For cluster type with TLS support, e.g. Kubernetes and Swarm, TLS is enabled by default. To disable TLS in Magnum, you can specify the parameter '--tls-disabled' in the ClusterTemplate. Please note it is not recommended to disable TLS due to security reasons. In the following example, Kubernetes is used to illustrate a secure cluster, but the steps are similar for other cluster types that have TLS support. First, create a ClusterTemplate; by default TLS is enabled in Magnum, therefore it does not need to be specified via a parameter:: openstack coe cluster template create secure-kubernetes \ --keypair default \ --external-network public \ --image fedora-atomic-latest \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 3 \ --coe kubernetes \ --network-driver flannel +-----------------------+--------------------------------------+ | Property | Value | +-----------------------+--------------------------------------+ | insecure_registry | None | | http_proxy | None | | updated_at | None | | master_flavor_id | None | | uuid | 5519b24a-621c-413c-832f-c30424528b31 | | no_proxy | None | | https_proxy | None | | tls_disabled | False | | keypair_id | time4funkey | | public | False | | labels | {} | | docker_volume_size | 5 | | server_type | vm | | external_network_id | public | | cluster_distro | fedora-atomic | | image_id | fedora-atomic-latest | | volume_driver | None | | registry_enabled | False | | docker_storage_driver | devicemapper | | apiserver_port | None | | name | secure-kubernetes | | created_at | 2016-07-25T23:09:50+00:00 | | network_driver | flannel | | fixed_network | None | | coe | kubernetes | | flavor_id | m1.small | | dns_nameserver | 8.8.8.8 | +-----------------------+--------------------------------------+ Now create a cluster. Use the ClusterTemplate name as a template for cluster creation:: openstack coe cluster create secure-k8s-cluster \ --cluster-template secure-kubernetes \ --node-count 1 +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_IN_PROGRESS | | uuid | 3968ffd5-678d-4555-9737-35f191340fda | | stack_id | c96b66dd-2109-4ae2-b510-b3428f1e8761 | | status_reason | None | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | None | | create_timeout | 0 | | api_address | None | | coe_version | - | | cluster_template_id| 5519b24a-621c-413c-832f-c30424528b31 | | master_addresses | None | | node_count | 1 | | node_addresses | None | | master_count | 1 | | container_version | - | | discovery_url | https://discovery.etcd.io/ba52a8178e7364d43a323ee4387cf28e | | name | secure-k8s-cluster | +--------------------+------------------------------------------------------------+ Now run cluster-show command to get the details of the cluster and verify that the api_address is 'https':: openstack coe cluster show secure-k8scluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | 2016-07-25T23:14:10+00:00 | | create_timeout | 60 | | coe_version | v1.2.0 | | api_address | https://192.168.19.86:6443 | | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | | master_addresses | ['192.168.19.87'] | | node_count | 1 | | node_addresses | ['192.168.19.88'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | | name | secure-k8s-cluster | +--------------------+------------------------------------------------------------+ You can see the api_address contains https in the URL, showing that the Kubernetes services are configured securely with SSL certificates and now any communication to kube-apiserver will be over https. Interfacing with a secure cluster --------------------------------- To communicate with the API endpoint of a secure cluster, you will need so supply 3 SSL artifacts: 1. Your client key 2. A certificate for your client key that has been signed by a Certificate Authority (CA) 3. The certificate of the CA There are two ways to obtain these 3 artifacts. Automated +++++++++ Magnum provides the command 'cluster-config' to help the user in setting up the environment and artifacts for TLS, for example:: openstack coe cluster config swarm-cluster --dir myclusterconfig This will display the necessary environment variables, which you can add to your environment:: export DOCKER_HOST=tcp://172.24.4.5:2376 export DOCKER_CERT_PATH=myclusterconfig export DOCKER_TLS_VERIFY=True And the artifacts are placed in the directory specified:: ca.pem cert.pem key.pem You can now use the native client to interact with the COE. The variables and artifacts are unique to the cluster. The parameters for 'bay-config' are as follows: --dir \ Directory to save the certificate and config files. --force Overwrite existing files in the directory specified. Manual ++++++ You can create the key and certificates manually using the following steps. Client Key Your personal private key is essentially a cryptographically generated string of bytes. It should be protected in the same manner as a password. To generate an RSA key, you can use the 'genrsa' command of the 'openssl' tool:: openssl genrsa -out key.pem 4096 This command generates a 4096 byte RSA key at key.pem. Signed Certificate To authenticate your key, you need to have it signed by a CA. First generate the Certificate Signing Request (CSR). The CSR will be used by Magnum to generate a signed certificate that you will use to communicate with the cluster. To generate a CSR, openssl requires a config file that specifies a few values. Using the example template below, you can fill in the 'CN' value with your name and save it as client.conf:: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = Your Name [req_ext] extendedKeyUsage = clientAuth END For RBAC enabled kubernetes clusters you need to use the name admin and system:masters as Organization (O=):: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth END Once you have client.conf, you can run the openssl 'req' command to generate the CSR:: openssl req -new -days 365 \ -config client.conf \ -key key.pem \ -out client.csr Now that you have your client CSR, you can use the Magnum CLI to send it off to Magnum to get it signed:: magnum ca-sign --cluster secure-k8s-cluster --csr client.csr > cert.pem Certificate Authority The final artifact you need to retrieve is the CA certificate for the cluster. This is used by your native client to ensure you are only communicating with hosts that Magnum set up:: magnum ca-show --cluster secure-k8s-cluster > ca.pem Rotate Certificate To rotate the CA certificate for a cluster and invalidate all user certificates, you can use the following command:: magnum ca-rotate --cluster secure-k8s-cluster User Examples ------------- Here are some examples for using the CLI on a secure Kubernetes and Swarm cluster. You can perform all the TLS set up automatically by:: eval $(openstack coe cluster config ) Or you can perform the manual steps as described above and specify the TLS options on the CLI. The SSL artifacts are assumed to be saved in local files as follows:: - key.pem: your SSL key - cert.pem: signed certificate - ca.pem: certificate for cluster CA For Kubernetes, you need to get 'kubectl', a kubernetes CLI tool, to communicate with the cluster:: curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x kubectl sudo mv kubectl /usr/local/bin/kubectl Now let's run some 'kubectl' commands to check the secure communication. If you used 'cluster-config', then you can simply run the 'kubectl' command without having to specify the TLS options since they have been defined in the environment:: kubectl version Client Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} Server Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} You can specify the TLS options manually as follows:: KUBERNETES_URL=$(openstack coe cluster show secure-k8s-cluster | awk '/ api_address /{print $4}') kubectl version --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL kubectl create -f redis-master.yaml --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL pods/test2 kubectl get pods --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL NAME READY STATUS RESTARTS AGE redis-master 2/2 Running 0 1m Beside using the environment variables, you can also configure 'kubectl' to remember the TLS options:: kubectl config set-cluster secure-k8s-cluster --server=${KUBERNETES_URL} \ --certificate-authority=${PWD}/ca.pem kubectl config set-credentials client --certificate-authority=${PWD}/ca.pem \ --client-key=${PWD}/key.pem --client-certificate=${PWD}/cert.pem kubectl config set-context secure-k8scluster --cluster=secure-k8scluster --user=client kubectl config use-context secure-k8scluster Then you can use 'kubectl' commands without the certificates:: kubectl get pods NAME READY STATUS RESTARTS AGE redis-master 2/2 Running 0 1m Access to Kubernetes User Interface:: curl -L ${KUBERNETES_URL}/ui --cacert ca.pem --key key.pem \ --cert cert.pem You may also set up 'kubectl' proxy which will use your client certificates to allow you to browse to a local address to use the UI without installing a certificate in your browser:: kubectl proxy --api-prefix=/ --certificate-authority=ca.pem --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL You can then open http://localhost:8001/ui in your browser. The examples for Docker are similar. With 'cluster-config' set up, you can just run docker commands without TLS options. To specify the TLS options manually:: docker -H tcp://192.168.19.86:2376 --tlsverify \ --tlscacert ca.pem \ --tlskey key.pem \ --tlscert cert.pem \ info Storing the certificates ------------------------ Magnum generates and maintains a certificate for each cluster so that it can also communicate securely with the cluster. As a result, it is necessary to store the certificates in a secure manner. Magnum provides the following methods for storing the certificates and this is configured in /etc/magnum/magnum.conf in the section [certificates] with the parameter 'cert_manager_type'. 1. Barbican: Barbican is a service in OpenStack for storing secrets. It is used by Magnum to store the certificates when cert_manager_type is configured as:: cert_manager_type = barbican This is the recommended configuration for a production environment. Magnum will interface with Barbican to store and retrieve certificates, delegating the task of securing the certificates to Barbican. 2. Magnum database: In some cases, a user may want an alternative to storing the certificates that does not require Barbican. This can be a development environment, or a private cloud that has been secured by other means. Magnum can store the certificates in its own database; this is done with the configuration:: cert_manager_type = x509keypair This storage mode is only as secure as the controller server that hosts the database for the OpenStack services. 3. Local store: As another alternative that does not require Barbican, Magnum can simply store the certificates on the local host filesystem where the conductor is running, using the configuration:: cert_manager_type = local Note that this mode is only supported when there is a single Magnum conductor running since the certificates are stored locally. The 'local' mode is not recommended for a production environment. For the nodes, the certificates for communicating with the masters are stored locally and the nodes are assumed to be secured. Networking ========== There are two components that make up the networking in a cluster. 1. The Neutron infrastructure for the cluster: this includes the private network, subnet, ports, routers, load balancers, etc. 2. The networking model presented to the containers: this is what the containers see in communicating with each other and to the external world. Typically this consists of a driver deployed on each node. The two components are deployed and managed separately. The Neutron infrastructure is the integration with OpenStack; therefore, it is stable and more or less similar across different COE types. The networking model, on the other hand, is specific to the COE type and is still under active development in the various COE communities, for example, `Docker libnetwork `_ and `Kubernetes Container Networking `_. As a result, the implementation for the networking models is evolving and new models are likely to be introduced in the future. For the Neutron infrastructure, the following configuration can be set in the ClusterTemplate: external-network The external Neutron network ID to connect to this cluster. This is used to connect the cluster to the external internet, allowing the nodes in the cluster to access external URL for discovery, image download, etc. If not specified, the default value is "public" and this is valid for a typical devstack. fixed-network The Neutron network to use as the private network for the cluster nodes. If not specified, a new Neutron private network will be created. dns-nameserver The DNS nameserver to use for this cluster. This is an IP address for the server and it is used to configure the Neutron subnet of the cluster (dns_nameservers). If not specified, the default DNS is 8.8.8.8, the publicly available DNS. http-proxy, https-proxy, no-proxy The proxy for the nodes in the cluster, to be used when the cluster is behind a firewall and containers cannot access URL's on the external internet directly. For the parameter http-proxy and https-proxy, the value to provide is a URL and it will be set in the environment variable HTTP_PROXY and HTTPS_PROXY respectively in the nodes. For the parameter no-proxy, the value to provide is an IP or list of IP's separated by comma. Likewise, the value will be set in the environment variable NO_PROXY in the nodes. For the networking model to the container, the following configuration can be set in the ClusterTemplate: network-driver The network driver name for instantiating container networks. Currently, the following network drivers are supported: +--------+-------------+-------------+-------------+ | Driver | Kubernetes | Swarm | Mesos | +========+=============+=============+=============+ | Flannel| supported | supported | unsupported | +--------+-------------+-------------+-------------+ | Docker | unsupported | supported | supported | +--------+-------------+-------------+-------------+ | Calico | supported | unsupported | unsupported | +--------+-------------+-------------+-------------+ If not specified, the default driver is Flannel for Kubernetes, and Docker for Swarm and Mesos. Particular network driver may require its own set of parameters for configuration, and these parameters are specified through the labels in the ClusterTemplate. Labels are arbitrary key=value pairs. When Flannel is specified as the network driver, the following optional labels can be added: _`flannel_network_cidr` IPv4 network in CIDR format to use for the entire Flannel network. If not specified, the default is 10.100.0.0/16. _`flannel_network_subnetlen` The size of the subnet allocated to each host. If not specified, the default is 24. _`flannel_backend` The type of backend for Flannel. Possible values are *udp, vxlan, host-gw*. If not specified, the default is *udp*. Selecting the best backend depends on your networking. Generally, *udp* is the most generally supported backend since there is little requirement on the network, but it typically offers the lowest performance. The *vxlan* backend performs better, but requires vxlan support in the kernel so the image used to provision the nodes needs to include this support. The *host-gw* backend offers the best performance since it does not actually encapsulate messages, but it requires all the nodes to be on the same L2 network. The private Neutron network that Magnum creates does meet this requirement; therefore if the parameter *fixed_network* is not specified in the ClusterTemplate, *host-gw* is the best choice for the Flannel backend. When Calico is specified as the network driver, the following optional labels can be added: _`calico_ipv4pool` IPv4 network in CIDR format which is the IP pool, from which Pod IPs will be chosen. If not specified, the default is 192.168.0.0/16. _`calico_tag` Tag of the calico containers used to provision the calico node _`calico_cni_tag` Tag of the cni used to provision the calico node _`calico_kube_controllers_tag` Tag of the kube_controllers used to provision the calico node Besides, the Calico network driver needs kube_tag with v1.9.3 or later, because Calico needs extra mounts for the kubelet container. See `commit `_ of atomic-system-containers for more information. High Availability ================= Support for highly available clusters is a work in progress, the goal being to enable clusters spanning multiple availability zones. As of today you can specify one single availability zone for you cluster. _`availability_zone` The availability zone where the cluster nodes should be deployed. If not specified, the default is None. Scaling ======= Performance tuning for periodic task ------------------------------------ Magnum's periodic task performs a `stack-get` operation on the Heat stack underlying each of its clusters. If you have a large amount of clusters this can create considerable load on the Heat API. To reduce that load you can configure Magnum to perform one global `stack-list` per periodic task instead of one per cluster. This is disabled by default, both from the Heat and Magnum side since it causes a security issue, though: any user in any tenant holding the `admin` role can perform a global `stack-list` operation if Heat is configured to allow it for Magnum. If you want to enable it nonetheless, proceed as follows: 1. Set `periodic_global_stack_list` in magnum.conf to `True` (`False` by default). 2. Update heat policy to allow magnum list stacks. To this end, edit your heat policy file, usually etc/heat/policy.json``: .. code-block:: ini ... stacks:global_index: "rule:context_is_admin", Now restart heat. Containers and nodes -------------------- Scaling containers and nodes refers to increasing or decreasing allocated system resources. Scaling is a broad topic and involves many dimensions. In the context of Magnum in this guide, we consider the following issues: - Scaling containers and scaling cluster nodes (infrastructure) - Manual and automatic scaling Since this is an active area of development, a complete solution covering all issues does not exist yet, but partial solutions are emerging. Scaling containers involves managing the number of instances of the container by replicating or deleting instances. This can be used to respond to change in the workload being supported by the application; in this case, it is typically driven by certain metrics relevant to the application such as response time, etc. Other use cases include rolling upgrade, where a new version of a service can gradually be scaled up while the older version is gradually scaled down. Scaling containers is supported at the COE level and is specific to each COE as well as the version of the COE. You will need to refer to the documentation for the proper COE version for full details, but following are some pointers for reference. For Kubernetes, pods are scaled manually by setting the count in the replication controller. Kubernetes version 1.3 and later also supports `autoscaling `_. For Docker, the tool 'Docker Compose' provides the command `docker-compose scale `_ which lets you manually set the number of instances of a container. For Swarm version 1.12 and later, services can also be scaled manually through the command `docker service scale `_. Automatic scaling for Swarm is not yet available. Mesos manages the resources and does not support scaling directly; instead, this is provided by frameworks running within Mesos. With the Marathon framework currently supported in the Mesos cluster, you can use the `scale operation `_ on the Marathon UI or through a REST API call to manually set the attribute 'instance' for a container. Scaling the cluster nodes involves managing the number of nodes in the cluster by adding more nodes or removing nodes. There is no direct correlation between the number of nodes and the number of containers that can be hosted since the resources consumed (memory, CPU, etc) depend on the containers. However, if a certain resource is exhausted in the cluster, adding more nodes would add more resources for hosting more containers. As part of the infrastructure management, Magnum supports manual scaling through the attribute 'node_count' in the cluster, so you can scale the cluster simply by changing this attribute:: openstack coe cluster update mycluster replace node_count=2 Refer to the section `Scale`_ lifecycle operation for more details. Adding nodes to a cluster is straightforward: Magnum deploys additional VMs or baremetal servers through the heat templates and invokes the COE-specific mechanism for registering the new nodes to update the available resources in the cluster. Afterward, it is up to the COE or user to re-balance the workload by launching new container instances or re-launching dead instances on the new nodes. Removing nodes from a cluster requires some more care to ensure continuous operation of the containers since the nodes being removed may be actively hosting some containers. Magnum performs a simple heuristic that is specific to the COE to find the best node candidates for removal, as follows: Kubernetes Magnum scans the pods in the namespace 'Default' to determine the nodes that are *not* hosting any (empty nodes). If the number of nodes to be removed is equal or less than the number of these empty nodes, these nodes will be removed from the cluster. If the number of nodes to be removed is larger than the number of empty nodes, a warning message will be sent to the Magnum log and the empty nodes along with additional nodes will be removed from the cluster. The additional nodes are selected randomly and the pods running on them will be deleted without warning. For this reason, a good practice is to manage the pods through the replication controller so that the deleted pods will be relaunched elsewhere in the cluster. Note also that even when only the empty nodes are removed, there is no guarantee that no pod will be deleted because there is no locking to ensure that Kubernetes will not launch new pods on these nodes after Magnum has scanned the pods. Swarm No node selection heuristic is currently supported. If you decrease the node_count, a node will be chosen by magnum without consideration of what containers are running on the selected node. Mesos Magnum scans the running tasks on Marathon server to determine the nodes on which there is *no* task running (empty nodes). If the number of nodes to be removed is equal or less than the number of these empty nodes, these nodes will be removed from the cluster. If the number of nodes to be removed is larger than the number of empty nodes, a warning message will be sent to the Magnum log and the empty nodes along with additional nodes will be removed from the cluster. The additional nodes are selected randomly and the containers running on them will be deleted without warning. Note that even when only the empty nodes are removed, there is no guarantee that no container will be deleted because there is no locking to ensure that Mesos will not launch new containers on these nodes after Magnum has scanned the tasks. Currently, scaling containers and scaling cluster nodes are handled separately, but in many use cases, there are interactions between the two operations. For instance, scaling up the containers may exhaust the available resources in the cluster, thereby requiring scaling up the cluster nodes as well. Many complex issues are involved in managing this interaction. A presentation at the OpenStack Tokyo Summit 2015 covered some of these issues along with some early proposals, `Exploring Magnum and Senlin integration for autoscaling containers `_. This remains an active area of discussion and research. Storage ======= Currently Cinder provides the block storage to the containers, and the storage is made available in two ways: as ephemeral storage and as persistent storage. Ephemeral storage ----------------- The filesystem for the container consists of multiple layers from the image and a top layer that holds the modification made by the container. This top layer requires storage space and the storage is configured in the Docker daemon through a number of storage options. When the container is removed, the storage allocated to the particular container is also deleted. Magnum can manage the containers' filesystem in two ways, storing them on the local disk of the compute instances or in a separate Cinder block volume for each node in the cluster, mounts it to the node and configures it to be used as ephemeral storage. Users can specify the size of the Cinder volume with the ClusterTemplate attribute 'docker-volume-size'. Currently the block size is fixed at cluster creation time, but future lifecycle operations may allow modifying the block size during the life of the cluster. _`docker_volume_type` For drivers that support additional volumes for container storage, a label named 'docker_volume_type' is exposed so that users can select different cinder volume types for their volumes. The default volume *must* be set in 'default_docker_volume_type' in the 'cinder' section of magnum.conf, an obvious value is the default volume type set in cinder.conf of your cinder deployment . Please note, that docker_volume_type refers to a cinder volume type and it is unrelated to docker or kubernetes volumes. Both local disk and the Cinder block storage can be used with a number of Docker storage drivers available. * 'devicemapper': When used with a dedicated Cinder volume it is configured using direct-lvm and offers very good performance. If it's used with the compute instance's local disk uses a loopback device offering poor performance and it's not recommended for production environments. Using the 'devicemapper' driver does allow the use of SELinux. * 'overlay' When used with a dedicated Cinder volume offers as good or better performance than devicemapper. If used on the local disk of the compute instance (especially with high IOPS drives) you can get significant performance gains. However, for kernel versions less than 4.9, SELinux must be disabled inside the containers resulting in worse container isolation, although it still runs in enforcing mode on the cluster compute instances. Persistent storage ------------------ In some use cases, data read/written by a container needs to persist so that it can be accessed later. To persist the data, a Cinder volume with a filesystem on it can be mounted on a host and be made available to the container, then be unmounted when the container exits. Docker provides the 'volume' feature for this purpose: the user invokes the 'volume create' command, specifying a particular volume driver to perform the actual work. Then this volume can be mounted when a container is created. A number of third-party volume drivers support OpenStack Cinder as the backend, for example Rexray and Flocker. Magnum currently supports Rexray as the volume driver for Swarm and Mesos. Other drivers are being considered. Kubernetes allows a previously created Cinder block to be mounted to a pod and this is done by specifying the block ID in the pod YAML file. When the pod is scheduled on a node, Kubernetes will interface with Cinder to request the volume to be mounted on this node, then Kubernetes will launch the Docker container with the proper options to make the filesystem on the Cinder volume accessible to the container in the pod. When the pod exits, Kubernetes will again send a request to Cinder to unmount the volume's filesystem, making it available to be mounted on other nodes. Magnum supports these features to use Cinder as persistent storage using the ClusterTemplate attribute 'volume-driver' and the support matrix for the COE types is summarized as follows: +--------+-------------+-------------+-------------+ | Driver | Kubernetes | Swarm | Mesos | +========+=============+=============+=============+ | cinder | supported | unsupported | unsupported | +--------+-------------+-------------+-------------+ | rexray | unsupported | supported | supported | +--------+-------------+-------------+-------------+ Following are some examples for using Cinder as persistent storage. Using Cinder in Kubernetes ++++++++++++++++++++++++++ **NOTE:** This feature requires Kubernetes version 1.5.0 or above. The public Fedora image from Atomic currently meets this requirement. 1. Create the ClusterTemplate. Specify 'cinder' as the volume-driver for Kubernetes:: openstack coe cluster template create k8s-cluster-template \ --image fedora-23-atomic-7 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes \ --volume-driver cinder 2. Create the cluster:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --node-count 1 Kubernetes is now ready to use Cinder for persistent storage. Following is an example illustrating how Cinder is used in a pod. 1. Create the cinder volume:: cinder create --display-name=test-repo 1 ID=$(cinder create --display-name=test-repo 1 | awk -F'|' '$2~/^[[:space:]]*id/ {print $3}') The command will generate the volume with a ID. The volume ID will be specified in Step 2. 2. Create a pod in this cluster and mount this cinder volume to the pod. Create a file (e.g nginx-cinder.yaml) describing the pod:: cat > nginx-cinder.yaml << END apiVersion: v1 kind: Pod metadata: name: aws-web spec: containers: - name: web image: nginx ports: - name: web containerPort: 80 hostPort: 8081 protocol: TCP volumeMounts: - name: html-volume mountPath: "/usr/share/nginx/html" volumes: - name: html-volume cinder: # Enter the volume ID below volumeID: $ID fsType: ext4 END **NOTE:** The Cinder volume ID needs to be configured in the YAML file so the existing Cinder volume can be mounted in a pod by specifying the volume ID in the pod manifest as follows:: volumes: - name: html-volume cinder: volumeID: $ID fsType: ext4 3. Create the pod by the normal Kubernetes interface:: kubectl create -f nginx-cinder.yaml You can start a shell in the container to check that the mountPath exists, and on an OpenStack client you can run the command 'cinder list' to verify that the cinder volume status is 'in-use'. Using Cinder in Swarm +++++++++++++++++++++ *To be filled in* Using Cinder in Mesos +++++++++++++++++++++ 1. Create the ClusterTemplate. Specify 'rexray' as the volume-driver for Mesos. As an option, you can specify in a label the attributes 'rexray_preempt' to enable any host to take control of a volume regardless if other hosts are using the volume. If this is set to false, the driver will ensure data safety by locking the volume:: openstack coe cluster template create mesos-cluster-template \ --image ubuntu-mesos \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --master-flavor m1.magnum \ --docker-volume-size 4 \ --tls-disabled \ --flavor m1.magnum \ --coe mesos \ --volume-driver rexray \ --labels rexray-preempt=true 2. Create the Mesos cluster:: openstack coe cluster create mesos-cluster \ --cluster-template mesos-cluster-template \ --node-count 1 3. Create the cinder volume and configure this cluster:: cinder create --display-name=redisdata 1 Create the following file :: cat > mesos.json << END { "id": "redis", "container": { "docker": { "image": "redis", "network": "BRIDGE", "portMappings": [ { "containerPort": 80, "hostPort": 0, "protocol": "tcp"} ], "parameters": [ { "key": "volume-driver", "value": "rexray" }, { "key": "volume", "value": "redisdata:/data" } ] } }, "cpus": 0.2, "mem": 32.0, "instances": 1 } END **NOTE:** When the Mesos cluster is created using this ClusterTemplate, the Mesos cluster will be configured so that a filesystem on an existing cinder volume can be mounted in a container by configuring the parameters to mount the cinder volume in the JSON file :: "parameters": [ { "key": "volume-driver", "value": "rexray" }, { "key": "volume", "value": "redisdata:/data" } ] 4. Create the container using Marathon REST API :: MASTER_IP=$(openstack coe cluster show mesos-cluster | awk '/ api_address /{print $4}') curl -X POST -H "Content-Type: application/json" \ http://${MASTER_IP}:8080/v2/apps -d@mesos.json You can log into the container to check that the mountPath exists, and you can run the command 'cinder list' to verify that your cinder volume status is 'in-use'. Image Management ================ When a COE is deployed, an image from Glance is used to boot the nodes in the cluster and then the software will be configured and started on the nodes to bring up the full cluster. An image is based on a particular distro such as Fedora, Ubuntu, etc, and is prebuilt with the software specific to the COE such as Kubernetes, Swarm, Mesos. The image is tightly coupled with the following in Magnum: 1. Heat templates to orchestrate the configuration. 2. Template definition to map ClusterTemplate parameters to Heat template parameters. 3. Set of scripts to configure software. Collectively, they constitute the driver for a particular COE and a particular distro; therefore, developing a new image needs to be done in conjunction with developing these other components. Image can be built by various methods such as diskimagebuilder, or in some case, a distro image can be used directly. A number of drivers and the associated images is supported in Magnum as reference implementation. In this section, we focus mainly on the supported images. All images must include support for cloud-init and the heat software configuration utility: - os-collect-config - os-refresh-config - os-apply-config - heat-config - heat-config-script Additional software are described as follows. Kubernetes on Fedora Atomic --------------------------- This image can be downloaded from the `public Atomic site `_ or can be built locally using diskimagebuilder. Details can be found in the `fedora-atomic element `_ The image currently has the following OS/software: +-------------+-----------+ | OS/software | version | +=============+===========+ | Fedora | 26 | +-------------+-----------+ | Docker | 1.13.1 | +-------------+-----------+ | Kubernetes | 1.9.3 | +-------------+-----------+ | etcd | 3.1.3 | +-------------+-----------+ | Flannel | 0.7.0 | +-------------+-----------+ The following software are managed as systemd services: - kube-apiserver - kubelet - etcd - flannel (if specified as network driver) - docker The following software are managed as Docker containers: - kube-controller-manager - kube-scheduler - kube-proxy The login for this image is *fedora*. Kubernetes on CoreOS -------------------- CoreOS publishes a `stock image `_ that is being used to deploy Kubernetes. This image has the following OS/software: +-------------+-----------+ | OS/software | version | +=============+===========+ | CoreOS | 4.3.6 | +-------------+-----------+ | Docker | 1.9.1 | +-------------+-----------+ | Kubernetes | 1.0.6 | +-------------+-----------+ | etcd | 2.2.3 | +-------------+-----------+ | Flannel | 0.5.5 | +-------------+-----------+ The following software are managed as systemd services: - kubelet - flannel (if specified as network driver) - docker - etcd The following software are managed as Docker containers: - kube-apiserver - kube-controller-manager - kube-scheduler - kube-proxy The login for this image is *core*. Kubernetes on Ironic -------------------- This image is built manually using diskimagebuilder. The scripts and instructions are included in `Magnum code repo `_. Currently Ironic is not fully supported yet, therefore more details will be provided when this driver has been fully tested. Swarm on Fedora Atomic ---------------------- This image is the same as the image for `Kubernetes on Fedora Atomic`_ described above. The login for this image is *fedora*. Mesos on Ubuntu --------------- This image is built manually using diskimagebuilder. The instructions are provided in the section `Diskimage-builder`_. The Fedora site hosts the current image `ubuntu-mesos-latest.qcow2 `_. +-------------+-----------+ | OS/software | version | +=============+===========+ | Ubuntu | 14.04 | +-------------+-----------+ | Docker | 1.8.1 | +-------------+-----------+ | Mesos | 0.25.0 | +-------------+-----------+ | Marathon | 0.11.1 | +-------------+-----------+ Notification ============ Magnum provides notifications about usage data so that 3rd party applications can use the data for auditing, billing, monitoring, or quota purposes. This document describes the current inclusions and exclusions for Magnum notifications. Magnum uses Cloud Auditing Data Federation (`CADF`_) Notification as its notification format for better support of auditing, details about CADF are documented below. Auditing with CADF ------------------ Magnum uses the `PyCADF`_ library to emit CADF notifications, these events adhere to the DMTF `CADF`_ specification. This standard provides auditing capabilities for compliance with security, operational, and business processes and supports normalized and categorized event data for federation and aggregation. .. _PyCADF: http://docs.openstack.org/developer/pycadf .. _CADF: http://www.dmtf.org/standards/cadf Below table describes the event model components and semantics for each component: +-----------------+----------------------------------------------------------+ | model component | CADF Definition | +=================+==========================================================+ | OBSERVER | The RESOURCE that generates the CADF Event Record based | | | on its observation (directly or indirectly) of the | | | Actual Event. | +-----------------+----------------------------------------------------------+ | INITIATOR | The RESOURCE that initiated, originated, or instigated | | | the event's ACTION, according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | ACTION | The operation or activity the INITIATOR has performed, | | | has attempted to perform or has pending against the | | | event's TARGET, according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | TARGET | The RESOURCE against which the ACTION of a CADF Event | | | Record was performed, attempted, or is pending, | | | according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | OUTCOME | The result or status of the ACTION against the TARGET, | | | according to the OBSERVER. | +-----------------+----------------------------------------------------------+ The ``payload`` portion of a CADF Notification is a CADF ``event``, which is represented as a JSON dictionary. For example: .. code-block:: javascript { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "" }, "target": { "typeURI": "", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", } Where the following are defined: * ````: ID of the user that performed the operation * ````: CADF specific target URI, (i.e.: data/security/project) * ````: The action being performed, typically: ````. ```` Additionally there may be extra keys present depending on the operation being performed, these will be discussed below. Note, the ``eventType`` property of the CADF payload is different from the ``event_type`` property of a notifications. The former (``eventType``) is a CADF keyword which designates the type of event that is being measured, this can be: `activity`, `monitor` or `control`. Whereas the latter (``event_type``) is described in previous sections as: `magnum..` Supported Events ---------------- The following table displays the corresponding relationship between resource types and operations. The bay type is deprecated and will be removed in a future version. Cluster is the new equivalent term. +---------------+----------------------------+-------------------------+ | resource type | supported operations | typeURI | +===============+============================+=========================+ | bay | create, update, delete | service/magnum/bay | +---------------+----------------------------+-------------------------+ | cluster | create, update, delete | service/magnum/cluster | +---------------+----------------------------+-------------------------+ Example Notification - Cluster Create ------------------------------------- The following is an example of a notification that is sent when a cluster is created. This example can be applied for any ``create``, ``update`` or ``delete`` event that is seen in the table above. The ```` and ``typeURI`` fields will be change. .. code-block:: javascript { "event_type": "magnum.cluster.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "id": "c9f76d3c31e142af9291de2935bde98a", "user_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "project_id": "3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "target": { "typeURI": "service/magnum/cluster", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/magnum/cluster", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2015-05-20T01:20:47.932842+00:00", "action": "create", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", "resource_info": "671da331c47d4e29bb6ea1d270154ec3" } "priority": "INFO", "publisher_id": "magnum.host1234", "timestamp": "2016-05-20 15:03:45.960280" } Container Monitoring ==================== The offered monitoring stack relies on the following set of containers and services: - cAdvisor - Node Exporter - Prometheus - Grafana To setup this monitoring stack, users are given two configurable labels in the Magnum cluster template's definition: _`prometheus_monitoring` This label accepts a boolean value. If *True*, the monitoring stack will be setup. By default *prometheus_monitoring = False*. _`grafana_admin_passwd` This label lets users create their own *admin* user password for the Grafana interface. It expects a string value. By default it is set to *admin*. Container Monitoring in Kubernetes ---------------------------------- By default, all Kubernetes clusters already contain *cAdvisor* integrated with the *Kubelet* binary. Its container monitoring data can be accessed on a node level basis through *http://NODE_IP:4194*. Node Exporter is part of the above mentioned monitoring stack as it can be used to export machine metrics. Such functionality also work on a node level which means that when `prometheus_monitoring`_ is *True*, the Kubernetes nodes will be populated with an additional manifest under */etc/kubernetes/manifests*. Node Exporter is then automatically picked up and launched as a regular Kubernetes POD. To aggregate and complement all the existing monitoring metrics and add a built-in visualization layer, Prometheus is used. It is launched by the Kubernetes master node(s) as a *Service* within a *Deployment* with one replica and it relies on a *ConfigMap* where the Prometheus configuration (prometheus.yml) is defined. This configuration uses Prometheus native support for service discovery in Kubernetes clusters, *kubernetes_sd_configs*. The respective manifests can be found in */srv/kubernetes/monitoring/* on the master nodes and once the service is up and running, Prometheus UI can be accessed through port 9090. Finally, for custom plotting and enhanced metric aggregation and visualization, Prometheus can be integrated with Grafana as it provides native compliance for Prometheus data sources. Also Grafana is deployed as a *Service* within a *Deployment* with one replica. The default user is *admin* and the password is setup according to `grafana_admin_passwd`_. There is also a default Grafana dashboard provided with this installation, from the official `Grafana dashboards' repository `_. The Prometheus data source is automatically added to Grafana once it is up and running, pointing to *http://prometheus:9090* through *Proxy*. The respective manifests can also be found in */srv/kubernetes/monitoring/* on the master nodes and once the service is running, the Grafana dashboards can be accessed through port 3000. For both Prometheus and Grafana, there is an assigned *systemd* service called *kube-enable-monitoring*. Kubernetes External Load Balancer ================================= .. include:: kubernetes-load-balancer.rst magnum-6.1.0/doc/source/user/kubernetes-load-balancer.rst0000666000175100017510000003310313244017334023437 0ustar zuulzuul00000000000000In a Kubernetes cluster, all masters and minions are connected to a private Neutron subnet, which in turn is connected by a router to the public network. This allows the nodes to access each other and the external internet. All Kubernetes pods and services created in the cluster are connected to a private container network which by default is Flannel, an overlay network that runs on top of the Neutron private subnet. The pods and services are assigned IP addresses from this container network and they can access each other and the external internet. However, these IP addresses are not accessible from an external network. To publish a service endpoint externally so that the service can be accessed from the external network, Kubernetes provides the external load balancer feature. This is done by simply specifying the attribute "type: LoadBalancer" in the service manifest. When the service is created, Kubernetes will add an external load balancer in front of the service so that the service will have an external IP address in addition to the internal IP address on the container network. The service endpoint can then be accessed with this external IP address. Refer to the `Kubernetes service document `_ for more details. A Kubernetes cluster deployed by Magnum will have all the necessary configuration required for the external load balancer. This document describes how to use this feature. Steps for the cluster administrator ----------------------------------- Because the Kubernetes master needs to interface with OpenStack to create and manage the Neutron load balancer, we need to provide a credential for Kubernetes to use. In the current implementation, the cluster administrator needs to manually perform this step. We are looking into several ways to let Magnum automate this step in a secure manner. This means that after the Kubernetes cluster is initially deployed, the load balancer support is disabled. If the administrator does not want to enable this feature, no further action is required. All the services will be created normally; services that specify the load balancer will also be created successfully, but a load balancer will not be created. Note that different versions of Kubernetes require different versions of Neutron LBaaS plugin running on the OpenStack instance:: ============================ ============================== Kubernetes Version on Master Neutron LBaaS Version Required ============================ ============================== 1.2 LBaaS v1 1.3 or later LBaaS v2 ============================ ============================== Before enabling the Kubernetes load balancer feature, confirm that the OpenStack instance is running the required version of Neutron LBaaS plugin. To determine if your OpenStack instance is running LBaaS v1, try running the following command from your OpenStack control node:: neutron lb-pool-list Or look for the following configuration in neutron.conf or neutron_lbaas.conf:: service_provider = LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default To determine if your OpenStack instance is running LBaaS v2, try running the following command from your OpenStack control node:: neutron lbaas-pool-list Or look for the following configuration in neutron.conf or neutron_lbaas.conf:: service_plugins = neutron.plugins.services.agent_loadbalancer.plugin.LoadBalancerPluginv2 To configure LBaaS v1 or v2, refer to the Neutron documentation. Before deleting the Kubernetes cluster, make sure to delete all the services that created load balancers. Because the Neutron objects created by Kubernetes are not managed by Heat, they will not be deleted by Heat and this will cause the cluster-delete operation to fail. If this occurs, delete the neutron objects manually (lb-pool, lb-vip, lb-member, lb-healthmonitor) and then run cluster-delete again. Steps for the users ------------------- This feature requires the OpenStack cloud provider to be enabled. To do so, enable the cinder support (--volume-driver cinder). For the user, publishing the service endpoint externally involves the following 2 steps: 1. Specify "type: LoadBalancer" in the service manifest 2. After the service is created, associate a floating IP with the VIP of the load balancer pool. The following example illustrates how to create an external endpoint for a pod running nginx. Create a file (e.g nginx.yaml) describing a pod running nginx:: apiVersion: v1 kind: Pod metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80 Create a file (e.g nginx-service.yaml) describing a service for the nginx pod:: apiVersion: v1 kind: Service metadata: name: nginxservice labels: app: nginx spec: ports: - port: 80 targetPort: 80 protocol: TCP selector: app: nginx type: LoadBalancer Please refer to the `quickstart `_ guide on how to connect to Kubernetes running on the launched cluster. Assuming a Kubernetes cluster named k8sclusterv1 has been created, deploy the pod and service using following commands:: kubectl create -f nginx.yaml kubectl create -f nginx-service.yaml For more details on verifying the load balancer in OpenStack, refer to the following section on how it works. Next, associate a floating IP to the load balancer. This can be done easily on Horizon by navigating to:: Compute -> Access & Security -> Floating IPs Click on "Allocate IP To Project" and then on "Associate" for the new floating IP. Alternatively, associating a floating IP can be done on the command line by allocating a floating IP, finding the port of the VIP, and associating the floating IP to the port. The commands shown below are for illustration purpose and assume that there is only one service with load balancer running in the cluster and no other load balancers exist except for those created for the cluster. First create a floating IP on the public network:: neutron floatingip-create public Created a new floatingip: +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | | | floating_ip_address | 172.24.4.78 | | floating_network_id | 4808eacb-e1a0-40aa-97b6-ecb745af2a4d | | id | b170eb7a-41d0-4c00-9207-18ad1c30fecf | | port_id | | | router_id | | | status | DOWN | | tenant_id | 012722667dc64de6bf161556f49b8a62 | +---------------------+--------------------------------------+ Note the floating IP 172.24.4.78 that has been allocated. The ID for this floating IP is shown above, but it can also be queried by:: FLOATING_ID=$(neutron floatingip-list | grep "172.24.4.78" | awk '{print $2}') Next find the VIP for the load balancer:: VIP_ID=$(neutron lb-vip-list | grep TCP | grep -v pool | awk '{print $2}') Find the port for this VIP:: PORT_ID=$(neutron lb-vip-show $VIP_ID | grep port_id | awk '{print $4}') Finally associate the floating IP with the port of the VIP:: neutron floatingip-associate $FLOATING_ID $PORT_ID The endpoint for nginx can now be accessed on a browser at this floating IP:: http://172.24.4.78:80 Alternatively, you can check for the nginx 'welcome' message by:: curl http://172.24.4.78:80 NOTE: it is not necessary to indicate port :80 here but it is shown to correlate with the port that was specified in the service manifest. How it works ------------ Kubernetes is designed to work with different Clouds such as Google Compute Engine (GCE), Amazon Web Services (AWS), and OpenStack; therefore, different load balancers need to be created on the particular Cloud for the services. This is done through a plugin for each Cloud and the OpenStack plugin was developed by Angus Lees:: https://github.com/kubernetes/kubernetes/blob/release-1.0/pkg/cloudprovider/openstack/openstack.go When the Kubernetes components kube-apiserver and kube-controller-manager start up, they will use the credential provided to authenticate a client to interface with OpenStack. When a service with load balancer is created, the plugin code will interface with Neutron in this sequence: 1. Create lb-pool for the Kubernetes service 2. Create lb-member for the minions 3. Create lb-healthmonitor 4. Create lb-vip on the private network of the Kubernetes cluster These Neutron objects can be verified as follows. For the load balancer pool:: neutron lb-pool-list +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ | id | name | provider | lb_method | protocol | admin_state_up | status | +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ | 241357b3-2a8f-442e-b534-bde7cd6ba7e4 | a1f03e40f634011e59c9efa163eae8ab | haproxy | ROUND_ROBIN | TCP | True | ACTIVE | | 82b39251-1455-4eb6-a81e-802b54c2df29 | k8sclusterv1-iypacicrskib-api_pool-fydshw7uvr7h | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | | e59ea983-c6e8-4cec-975d-89ade6b59e50 | k8sclusterv1-iypacicrskib-etcd_pool-qbpo43ew2m3x | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ Note that 2 load balancers already exist to implement high availability for the cluster (api and ectd). The new load balancer for the Kubernetes service uses the TCP protocol and has a name assigned by Kubernetes. For the members of the pool:: neutron lb-member-list +--------------------------------------+----------+---------------+--------+----------------+--------+ | id | address | protocol_port | weight | admin_state_up | status | +--------------------------------------+----------+---------------+--------+----------------+--------+ | 9ab7dcd7-6e10-4d9f-ba66-861f4d4d627c | 10.0.0.5 | 8080 | 1 | True | ACTIVE | | b179c1ad-456d-44b2-bf83-9cdc127c2b27 | 10.0.0.5 | 2379 | 1 | True | ACTIVE | | f222b60e-e4a9-4767-bc44-ffa66ec22afe | 10.0.0.6 | 31157 | 1 | True | ACTIVE | +--------------------------------------+----------+---------------+--------+----------------+--------+ Again, 2 members already exist for high availability and they serve the master node at 10.0.0.5. The new member serves the minion at 10.0.0.6, which hosts the Kubernetes service. For the monitor of the pool:: neutron lb-healthmonitor-list +--------------------------------------+------+----------------+ | id | type | admin_state_up | +--------------------------------------+------+----------------+ | 381d3d35-7912-40da-9dc9-b2322d5dda47 | TCP | True | | 67f2ae8f-ffc6-4f86-ba5f-1a135f4af85c | TCP | True | | d55ff0f3-9149-44e7-9b52-2e055c27d1d3 | TCP | True | +--------------------------------------+------+----------------+ For the VIP of the pool:: neutron lb-vip-list +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ | id | name | address | protocol | admin_state_up | status | +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ | 9ae2ebfb-b409-4167-9583-4a3588d2ff42 | api_pool.vip | 10.0.0.3 | HTTP | True | ACTIVE | | c318aec6-8b7b-485c-a419-1285a7561152 | a1f03e40f634011e59c9efa163eae8ab | 10.0.0.7 | TCP | True | ACTIVE | | fc62cf40-46ad-47bd-aa1e-48339b95b011 | etcd_pool.vip | 10.0.0.4 | HTTP | True | ACTIVE | +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ Note that the VIP is created on the private network of the cluster; therefore it has an internal IP address of 10.0.0.7. This address is also associated as the "external address" of the Kubernetes service. You can verify this in Kubernetes by running following command:: kubectl get services NAME LABELS SELECTOR IP(S) PORT(S) kubernetes component=apiserver,provider=kubernetes 10.254.0.1 443/TCP nginxservice app=nginx app=nginx 10.254.122.191 80/TCP 10.0.0.7 On GCE, the networking implementation gives the load balancer an external address automatically. On OpenStack, we need to take the additional step of associating a floating IP to the load balancer. magnum-6.1.0/doc/source/index.rst0000666000175100017510000000565313244017334016750 0ustar zuulzuul00000000000000.. Copyright 2014-2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================ Welcome to Magnum's Developer Documentation! ============================================ Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. * **Free software:** under the `Apache license `_ * **Source:** http://git.openstack.org/cgit/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** http://bugs.launchpad.net/magnum * **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient Architecture ============ There are several different types of objects in the magnum system: * **Cluster:** A collection of node objects where work is scheduled * **ClusterTemplate:** An object stores template information about the cluster which is used to create new clusters consistently Two binaries work together to compose the magnum system. The first binary (accessed by the python-magnumclient code) is the magnum-api REST server. The REST server may run as one process or multiple processes. When a REST request is sent to the client API, the request is sent via AMQP to the magnum-conductor process. The REST server is horizontally scalable. At this time, the conductor is limited to one process, but we intend to add horizontal scalability to the conductor as well. Features ======== * Abstractions for Clusters * Integration with Kubernetes, Swarm, Mesos for backend container technology * Integration with Keystone for multi-tenant security * Integration with Neutron for Kubernetes multi-tenancy network security * Integration with Cinder to provide volume service for containers User Documentation ================== .. toctree:: :maxdepth: 1 user/index Contributor Guide ================= .. toctree:: :maxdepth: 1 contributor/index Admin Guide =========== .. toctree:: :maxdepth: 1 admin/index Installation Guide ================== .. toctree:: :maxdepth: 1 install/index Sample Configurations and Policies ================================== .. toctree:: :maxdepth: 1 configuration/index Work In Progress ================ .. toctree:: :maxdepth: 1 admin/troubleshooting-guide.rst user/index.rst admin/configuring.rst magnum-6.1.0/doc/source/images/0000775000175100017510000000000013244017675016351 5ustar zuulzuul00000000000000magnum-6.1.0/doc/source/images/cluster-template-details.png0000666000175100017510000021534713244017334024002 0ustar zuulzuul00000000000000‰PNG  IHDR…S0Œ­ pHYsttÞfxtIMEà +~ô*tEXtAuthor©®ÌH tEXtDescription !# tEXtCopyright¬Ì:tEXtCreation time5÷ tEXtSoftware]pÿ: tEXtDisclaimer·À´tEXtWarningÀæ‡tEXtSourceõÿƒëtEXtCommentöÌ–¿tEXtTitle¨îÒ' IDATxœìÝ{X”÷ÿÿç 0xTÀ%Á€&P³ÓŠMÄ6¤+i+nñ×›F7]ñ›Ûo%ùVÌ6Ø+•$ÓT{P»iZ¡FH³bR¥Ö`¢¨ xà 0f`f~Ì€œA@Qæõ¸.® 3Ÿûs¿ï{”8/>Cmm­q+ÆÁ.@DDDDDDDD®?…B""""""""nH¡ˆˆˆˆˆˆˆˆR($"""""""↠‰ˆˆˆˆˆˆˆ¸!ÏïÑá‡{S#ŽË—±_¾„½ÑŠÃbÁnµÐXs»¹[CÖªÏh:WKÓ™S4ÕÕb­­Å}/ÓŸý/ßð\¾„Ãn» ‡ÕŠý‚™&s=öKÁnosŒÃÑÉi ¶éw3cÝ+ý-kÈk{ö¤ß¡3ÀqàÀ¹¿½§Ñµ>±y]!×ZA­ÖjþÞ`0àðð ÉjÅf·còô¼Ò¶—ÁСC‡øÃþ€ÍfcÒ¤I<úè£øøøpîÜ9^~ùeî½÷^ÆŒC^^÷ÝwsæÌq­iäTWWÇk¯½Fuu5<öØcL™2€¿ÿýï¼õÖ[,\¸#GŽPZZŠÁ`àK_ú<ð@Kv³™ê5i\þG)>q_ÃÿÏpâþÙ-ë+U?÷, Œˆÿ©?Ä\øgνò3—.á°ÛðûV£—üG‡ëløãvêÿû7ØÌ GŽdü†_uz?>_øMlgÏ0þg¿ÀûÎÈ^ÝÃþ²î™o¬|“Æ^áÅ¿®ûOÏèï_ÎZ V=BQìë¬÷¿ŠÃ XõH±¯¯åj‘œõ¹«VòꇎK'ãû³™0`'ZŸCû¿ÁÀĨ{0z\É—8GŒF5 CÐ$ŒáÓñ˜õe<⾆Ƿáµ(ÏÇžàÒ¼oòöáÃìûŒW^y…K—.µiwàÀvìØÁ-·Ü‚Ãá`÷îÝ”••µ¼^__Ï/~ñ jjjÇápðÛßþ–úúzn¹Å9Ýëü#ûöí#$$»ÝΞ={8pà@K?gÿïÓ\þG)Ãþ%ŠáQ÷`=ZÀð¨{ð;Sèí ûâ,L·MÀœû{¼¦„0ìž/aôñ¡þ÷Û8¿õ×mê¯}ùê^y ‡ÍÆð/ÆàuË$Œ#Ft¸uY/b;{ŸÿõºBæS§¯"häô)ó5ªFDDDDDDÜÕ‘Üô–@àda:ÏçŸÄŠnlý_SÈáàÔ%Øšñ4:6âÜYÌîeÂcù÷1ŽƒÁmF5~TFõÅ‹àppðôi>:ušËelÃGÿ½0Æv³ûØþðžzê)† Æ›o¾É¾}û())áÞ{ïmiWWWÇ‚ ˆˆˆà“O>áµ×^ãü#3fÌà½÷ÞÃb±°xñb¦L™ÂÇÌïÿ{ ø·û7<=·éòåË<ùä“ÜrË-|ðÁìØ±ƒÂÂB"""h‡£© G£3*»6“Ú‚¹àÏø~óßqWÜ5îg¿À`ôÀàáåàGœþ?ÿÎ…‚7õøX?9Œ9ÿ Œ#Fôû70xyáhlÄèÝvšõxæ7ßÀë–IŒ~jy?ßÔ«7>ú_‰ õéU[óÑ"N_ãzºåÏÚ·ã³pµÝßñÓæ¡u0€c…úäýÚÿ5…\Óš·“ÿøôi¬6MMŒ‹œÁ­cÆ`0u3Mȵ´0Ú7ÌD¨ï(†x½ãÎ/`ðìXfó;ï¼oooæÌ™Ã¾}û(--m ™L&"""p8Ü~ûíŒ;–³gÏrâÄ &OžÌ¾}ûðõõm™.vÇwàååÅÇÜæœþþþ-£î¸ãòòò¸pÁ™@Úœl6çýðô¼R·Ñˆ£931¯]­ÃæÑ=Mg®ü!®ûÙOXµº¥­Á»cPfþÃ6MMø.zãÈÞ…3)8ö1¼Ÿ“Á³¹?ŸÂƒµ¶›0ûQb‚òÓî:;µ‡—Ÿÿ…‡êhôCôw3X3*¦–×~Ê›‡¬„Æ=Æš2r Å›™6“=?{•­cˆýþzž0ýŽçú&‡¬¡Ä=“Î÷gOh;lV=RHðÁüÝ›²¶;_kµûÉÏÝGg³hóæNîxŒˆˆˆˆˆˆ\'þLö‚’¶sY¦MÓó¡7êçP¬œØ•OáÑÎ>cO`æüyÌèÇt´Ù}ÌѲ«˜SuÕ óðÀäáÁ¥Ÿb¿|ãðŽS:3Ùׇ[Gù`w8{ô9²Ë¶çÎ 00°eaj¼½½[^kÐÒîÖ[oåìÙ³œ?žºº:FŽÉ|€ÃáÀn·c2™hlläòåË-ý4O#k^÷ÈÛÛ›¦¦&¼§ÝqÄH.¾WĹW×;Gëôrm¤¦³ghÈyëÛ÷Œ¾~Ý\4ìÈ¥n}&OO¼n™„ÁäÃjÀÑÔã ›it ;r0xî8óÉ}œ4…âtûºš²9}<ÁÝö2yY¬˜kOpòè ÌàÜÙìD ù‡bùöÚ©øÌx”oGÿ‘–cC™?o†óµÐéÌh?ûÖœ˜ü™ë¿ËôN?¸ÝÀŸC11ý{¿`=ÿÎòÖÁPpéßgv?­ÐPȃ:‹•á®ðÄ`·ã0ÐíbÑ×t+€KMM lö^o>Ö¥Ö;‹u8§kÊ[ëgêÔ©|ík_ÃÖª‡ÃÁ°«ØB~äýñxßÁÙÿ÷,eû9»jã^þE§Óß.ÿ³”ºõ™xŽŸÀ¸—~Žç¸ñœZú8Ö£Gz}NƒÉ„qÔhÌoåãûÍÃë¶N¢Åë`ÂÌGñ7ù@ì£]?&¬»ºëÅÊ‘ÜU¬|õ(þ¡3øB´‰–ÉpNp”1ø·<áÏ„6'ƒWŸfÎ]Åqm‚!W ô¨!‘ÁcfÏÿ”½>ä×éOulrôužÏå:Šs£m 5B°LÒÀlIßz»ù«<ÞwR0~“‚9òMvçúB-t“ M˜à¼úÖ#‚€SÇ€6A€Åbié£y¤QSSþþýßÎ3èÆgý’ª'’h½ÒÏH<ÇÒtú—ÿ¶—‘quz>ƒÉ Ÿ‡çs~ó&.íyÛùsxŒîÅBZlrÜ3L0™0}oÓ»!×ãH!3µœÿÝŸû:2†8+09†oFÿŒ_ÿv3¾7æó»"ð‰À‹è­ 3™?ȋ׋ˆˆˆˆˆP{ŠNVÜéèt-]GG7ÁçPLLŸ? =Èô± gÎ8G 9Ú&@‡»å28Η `¯?åó\*ÿ˜ê÷ÿއÍNê.Yøgu-Õ—-Xmvf-&°‹©Waaa-O]]þþþÓ¦­Ãáà/ù ÷ß?ûöí£®®???˜9s&ûöíãW¿ú?þ8à]d2™º†Ö†ÝŽõÓ#xMž‚ÝlÆz¢ƒ—F×Îhƒ,>dø—ïÃ8b$ÞwFréo{¸ü÷=xM Æ\øçÝŽþÞÓT¯þ!5/ü¯ÛBñœ4ì6Œ#Z­ØìpH>þ+æorþW¯â¿2­wuÿЙÔ-„Öõò¯PžZßŨ,ÿÙ<ñT!i©ëÍcÏ<Å%i/þ+Fà«ÁV»oOlþ£±{zb»tÑ9JÆnwŽ²Ù°[-8vÎ[”Ÿ»Àøñcñ=_Ãp“&£ƒ ߯ÿ‰ ],²|äÈþçþpn_[[Kpp0=ö^^^œ;wŽ—_~___ðóó£¾¾ƒÁÀSO=Ÿqã¸xñ"¿þõ¯©©©ÁÛÛ???Ξ=ËÃ?ÌÝwßMUU7n$22’ÄÄD.]ºDff&MMM¤§§c~+ŸÚ̵xMÁV}ûÅ ø=²ˆÑß} .½_ÌÙ´•‡ #àÙçðœ0‘ªï~¯[i¬8†×¤`?;Éä¿ìi¹ÎÚ—^ÀüæxøÂa·1áÕß`=têçW3êñ'õíï`7›ùìñLÜ´uÐÖ¹~¬œØõs~új!‡ê: ä5fqÿ瞈…x†>…Bv»3gÎP__ÏEóÊ×ÿ”úÊÀáÀÛÃ/#ážÂ&M¤©þ\Ë2AÍ'sØí®ÑEvp@ƒÃ“q!Suò0yã†Å}Ÿ_ÙåBÍgÏžeçÎØl6BCC¹÷Þ{[^k…n¿ývfÏžÍ{gÍfcÞ¼yv¸¦ÒÒRr.†Ì¿øE†NCCï¿ÿ>“&M"<<€ÆÆFÞÿ}ìv;÷Þ{/ö‹høÃï°~\&o|¿Å°»¢Úœãâ_ws!ïO8l6F=ö¼¿pŸrîW¿€¦&ü=ŽÝrË?Jýä÷Ú»ç]Ìùk#^aáŒúöwh:sš‹ï2ü‹³ðŽøBK»Æ£ŸàƈÙ÷""""""""Ò™>jhh âXçÏÕqâ—?çÒ¡¸ì†[Ž9ìv¬·…cmhÀhk×!­xŒôÁèeÂcØp¼¼½1\2㨫Åa½ 0лP¨½æuƒàJ(Ê·¿ýí¾\æUŸSDDDDDDDäfÐç5…|}}™4‘úúóWÆ ´|ï0VuÒÙì`·ÍŽ4\Ù%ÌÁ•Dm•>-ƒÎ(‘›M/WQî\`` ¡aaX'Nrî4Ö*i=c¹ —.9G 55Ã~¥­Á€g¨Òüåh½Xµ«ˆˆˆˆˆˆˆˆ ¬~…BÆãîÇsæ Qíö– §yôLëÀ§õ—ëE v»3(²»¶´w88Lѳº\dº7𦯯®7qG}^S¨½’?¤hýK^^xã9}ã'âuûô(GDDDDDDDDº1`¡@Õ©Sxž<†¯åßÑ}|áψ‘¼L0|8F“ FM D ‰ˆˆˆˆˆˆˆÈÍ¡ßk ‰ˆˆˆˆˆˆˆÈÍG¡ˆˆˆˆˆˆˆˆR($"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nÈóÓO?ìDDDDDDDDä:38Ç_?©#÷ƒÓ9s‘ Û`×$2¤Ìö屘[¸+Øw°Kiax¯¼Ö±ú#ƒ]‡È—ùÈ4C""""""rÃ0攞ìDÜÂÖ=Ÿv """""""-Œ~Ö0Ø5ˆ¸…£g/v """""""-´û˜Èu¢õºDDDDDDäF¢PHDDDDDDDÄ )qC …DDDDDDDDÜB!7¤PHDDDDDDDÄ )qC …DDDDDDDDÜB!7äÙÿ.neóÊ@&·{Ö|¾ž‚·óóŠ«ìîîpÞøÊŽüï?Hý eùù2ûýgûчˆˆˆˆˆˆˆÈ5`#…NØÇW×¹¾¶žäˆ·‰soeÆ@ઓõi<5('¹á]›écgOñB…FµÇ~p˜¯¯ëç(!<ñ€1P2” gfÈðn[„†øzª¹Þ®Yt2ÁÀ\S†S[ãÁä#§}ÌŠ“ãùɽLÐÄ‘eé_ê;L>…çÇTW»‡>ã‡ùg9 ŒŸq[›>œ¯ùÁ2×t¶ˆ™lf‹w^««”›“?X|;ñö+îÚ™yÿ<{×L5Õühó1ö B•"""""""×Ò5)4>x{c=SON˳^L°aɦüè]_ž€ý)þkÓ~þëP#“ﺵw¶ïiÏ>4Ž õ§ø¯MûXòîEü§MæÙ{\¯µô±%ï^ÆÚ-|/ü4©ª9sJ›!éàÞ[˜`<™zW(ï÷kórs ä˜üyìÞA©RDDDDDDäš°‘B“#fòNÄ•ïÍçϳõ­“œnyÆÎ‘c'9ZÜÂTÏFþù÷“ÕùgI™Ìcá“VÞ;†©žÞßåj÷þ1ö͘Áì©“ÀÛ·mïäëïÔÕÈöÞQ~9îž¼Õ Ss0„sÄPë@ìÔûŒ¿7Èõ¶b³ÙhllÄn·v)""""""2HŒF#&“ £±c},êyTŽë×јh¢îpû6í.f¤&¼ùò#3y§õóM^Ýô!Ò+¹9CbÛ`hó¸Füƒ†· „–åœnl.›Í†Åbì2DDDDDDdÙív._¾Ì°aÃú ÎrÌlXñfL8Ð&Ô±wÒÎÂû¯Èê“íú¸wz}ˆôFÇ`hrPó_‡/hllìDDDDDDäbµZ6lXŸ¿6»õä½:Ž4yqǃ‰õ3;o,w kâÈ'g;içÍ]sƒ‰õƒñá·±ùé™ü!1 ]zçm¼¶2Šsš°&oßA¸8¹y8ƒ¡_kÄÚòÜš2&""""""mô÷sâà„BœâÇ»j¨õ›Àÿ[2ƒÿ7Õ‹ÿ<Î ¶¡oÝn&ÛÆà_{†rj:¼¶é¡Qpì3~´ àGkìL¸}Hô¿îW'7“ÖÁЉˆˆˆˆˆˆ 4ÃW~ú¾c°‹h£Ý–ô"ׇ‰!Þœªh¸¦Ð;+göùØ‹/`%""""""2Œ1¢ÏÇΚBÝ?Ê6êŽ v%â^¬ì¯°öÜLDDDDDDdˆ¤éc›ÿÈ¿°-jÖÊ:ò껑¡ë†)”ûú?Èì"DDDDDDDDÜÀ 5RHDDDDDDDD®…B""""""""n膚>&"×J¯ü¼–mž315€gâÆãÓŸ®«³ò÷ùê·¦óP`¿Š‘ëH¡ˆyð+wòÓ\ßÔÎËÙÕ¼üÏÞ=rPë‘ëOÓÇDÜ•ß8î „>×V""""""îH#…DÜUýÞ«6ðÐã®5ôæéeu•ü2¿Ž·Oš=‘'#üÚwÊßr>ãѼú þMK‘kJ¡ˆÙù¿±ó¯|Ë­¬»ÍÃõ]-o¼U3BȽ{ÖOóÔÎs¼7s<©å?Õqø–qüöÛcñùü+ß8Å»“ý¸¯¹³¦ ”þIˆˆˆˆˆÈÍB¡ˆi»¦ÐY¶¼q†§Þ2òßüùúbhº„¹úÇN6q;Ø€Ïϳã’i¬3ì¹åVÖ}ÏÕO5€ƒwÞ>Îa“/¿}DˆˆˆˆˆÈÍ@k ‰¸+¿±$}Á‹sÇê)À¡ƒÌßô)Oý©š·ê¡eùé;gð`LûÙb-œ6y^cæÝS¶ëP¼ˆˆˆˆˆˆô—B!qª;ͯŽ8xxÞ4þû»áü`¦÷•PÈ×È8lÔu¹&µ‘¤¯†òý;aëÛŸsúúT,""""""ý PHÄ]YjyëÃFFßêGd«§/XìÀJÿvc€µ ¸eodÇßÎbçvö?fˉÖz0~Î8’šøyqÃõ»é­)$âFÚ.4m "h4këüvÌx¾;õéo—óÎÿzòÐìQ<\}ŽÃ§-0ÁŸ¯ã2gòÏðØÏÏ€‡‘/}a"É“q­)Ô,€‡¿|Ž7Þ®âo3|ùÒˆëzy""""""r ‡Ã1ØEˆHÏ.^¼8Ø%ˆˆˆˆˆˆÈ fĈ¾ÿ6^ÓÇDDDDDDDDÜB!7¤PHDDDDDDDÄ )qC …DDDDDDDDÜB!7¤PHDDDDDDDÄ )qC …DDDDDDDDÜB!‘›„Ѩ¿®""""""rE?'êS¦ÈMÂËËk°K‘ˆÉdê×ñ …ÄÍ™9q²n°‹è¼½½5bHDDDDDÄÍF† Öïχ‡Ãá š†ˆ&ÌŸ奷ØsÑA#0r„‰„˜Iåh½'?¹o”«Í%ü§ŽgÛ¼Éø{^âĻ嬸ËöŸ½• ´ jÍüó2|ypþ{ê»8yK0Ó„õäQV¼^OêëG{ÖÜõÈLÞº àD»ó¸¹š*~[á õö¼_Á@Áˆˆˆˆˆˆ¸ ÍCiã$[ß¾DÝX~pß8Lž£¸ã¡açÊ™üä¾QWÚøùñÜ7&ãï 0œÉ÷…ñôD8¸ÿsvoÕ¥‡‘XÙw¤ ë3û00Þ»»:<1OæÑ‰@ýežï®­ôYÀí<7o$¡8œÁлՃ\”ˆˆˆˆˆˆÈõ¡P¨µO(°Áô Q®©^8tž<„ÞÐn¤Íp¾|ç0ÀÊ_\ºòt7³]GÎðñ1+xŒ`ö¸kqrµ|¦ÝÁ‹ †DDDDDDÄ iúX+æªF.¡ºnsÆF#0}b'ÓŒ< œ®½ÁÍOŽàÿËž©çVã†1ƒ ävy–&¬'ó»*ðò÷aƨ¾^QêëY´n_›§¦Ï¸ t}ÝC™Ï´;x‘Y‘£ÍÁšJ&""""""C›B¡>²65Ñ»ÛçÅô`Ø™ƒŸÀ Ì ħª¦cÓvaMðD?6|#ÿ«ÚEk uà3íž«ü‹>hÂ9bè$wïž9™ˆˆˆˆˆˆ¸…B­øŒóÀ 8ZU§-mΞÚŽ$±ZlÎ6Þ^mžŸ2Œ‘û/*CB IDAT³ËÆAô1?ú ÉõÙ÷+‘!Mk µ6m p°ò<æÚý¤ºy//3ï²&âïôk{Ìí>ÌÖÛÀc¡}ZOh$Sý‹k‡× ˜º]¸Zºc>Ô–ýoÍ•6øñö N5Ôsä­OøqLŸq s;Œ0Í]®9`^A>ÜѧÚ˜9Õ/‹™­wM=;_EÞ1x`îí}êÔí)w¥écíøDLc‹÷Q^zû8¿œF`ä‰÷߯“w´jsŒWÞ«&ùå³­ÚLåÉ»ÆtÒ«Ÿs]¡Z³§Žísm¾r;lÇøñÞOùê{Ÿ0Æ8kî.hêd¡iNæÊîêsu7¡šOø‘!qS‡Ãáì"D‡™n?Ä+àB""""""âf ‰›3sâd#“ƒ;á%""""""2t)qCZhZDDDDDDDÄ )qC …DDDDDDDDÜB!7¤PHDDDDDDDÄ yv"""""""Ò{çÎãÌ™3\¼x›Í6ØåÈ ðõõeâĉøúúö«mI/""""""r“8wî3zôh<<<»$éFii)QQQÞoMM 'Ož$44´_Á¦‰ˆˆˆˆˆˆÜ$NŸ>MHH „ÜX@@ÁÁÁTVVö«…B"""""""7 ³ÙÌèÑ£» ¹Œ=šK—.õ«…B""""""""7~¯)¥PHä¦RFft4ÑÑÑDGgRÖïþ¨Ø»´¥IÄÇ4÷KbR*9{©h€’ZÃaòÒ· ÀµEýç£'Õ”nɤ úšŸHDDDDD®…B"ˆô¤x,‘ÂÒrj¬Í/˜9^¾›ÜµËYŸDæÞå¿…мt’â±&¿j°‹q{Õ¥[Xû K7T`í¹¹ˆˆˆˆˆÜ€ ‰¸£†"Ò¬$¿¼‡óÖr²—/%³Ìr}êêV99kòé©d¹ª)Þ²Ró`×!"""""ý¡PHÄíX(ÍJ£Ð®˜Âæ±úµì))¡ÄõµgÇfVÍ›âjœì´lZ½""""""r-(q7–R r›‡ÛÌaõÆtÂñnÕÄ;(’Äô,VÏr=Qµ‰|-â#"""""2¤(q7 5´¬È3+–(ß®1+®9²RQq£¬-$"""""}R]ʶ´d×&3±$¦n¡ôšü3ßB¿ è®ÎꜵšY v8°Š½ylÚšß¶®¨bç-daBd'ÇtrÎ…›)I8\°‰M›òØ}ÜììkNK–,!>¼õºÚm-¤¸¼+&"b™·ðq’ãÃéìV”eF³8`«wf‘ ‡ Ø´iy»c|¦D;ŸÇ“ã ïÏýl¾7¥EäåäS|ø0åǯ,ä3%Œ¨¨D“âˆéðÆ•‘½˜ì6ϳæÁhÖ°Í%©Dvv¾¾¾""""20*óHY°…€UlÏÇ kHYšIƶT"½{î¢w*Ø–”DÕª½¤vü‡aÿë¼jB4RHdˆiùÌáù¬ÖIl„ëqq¥]n;ß@iQsAld'Û+ H}Åk¶v½ƒÙÜXRs*®k o©È!5vnç;«•²uÍbŒ]ʶ²./¾]‡‡Ù’Ï¢g²]«¯ÝÙ<³(Ô‚ÊŽíÊk\»rY©9PÈÖg‘Z@e/NWYJ¢gÈvBæã¥n}†EýÜÎrxKcç²`é¶–¶ „œç)gwîZ–/˜Kl/ëíö|ý^ˆˆˆˆHX(ݲ–Š%¤'4ÿ¢Ò›øU, ÉaSQÿÕ×ZU}ÞæzÕÙŸ‡…B"CHCi&)Íi«·eÔ¾Uó–Ìw üÙÍš¥™TT· m,Õd.eÍnç÷¦ùK˜×¾K™)ϰ»Ç¨Ìì^›DæÞë YÊ2IZ°¶çºÌ¥¼¸8¹;«UP°r1tõ? 3»×l ¨º’¼nÛy÷3dt~XK³Hyf7]–ߟáo!iÑ‹½Þ5̼ûR¶ô}‰ñ/DDDD¤O,¥äB\Tx»|‰ÏÜKV|kZV:™YIÄDÇYj†2¶¥ÄML| [ZÏ5«,"#ÙùZtL<)Ûc¡š¼çÈòìÅÑd6¯KÚ]?W[gk¦’•‘Ý|^ ‡·¥´LAKÎ( Â\mÝ—T]JNVY|m+¸ö¿\×ô1‘!ÂR–IòÒlŽƒ3ÚžEB‡@ÈÉ;&í묬L˧¼<›gdwÞaó2X—CûѤ•ù™dwµŠxœué ‰ ¹²`µ¥²ŒüMé¬Í?XÉÍ.dILgÓ¼z˜@VIm§.u1M©¡ˆ5Í÷ð‰Zƪqùê²ÐPYNᦵdæ—cå8ÙK×UAlWS²rÖ²ÆjÅ'j«[¦T5Î#3m ÎË+$kéaŽ·b [ÈêŒÇ‰uÝ Kå^6¬\I¶ë7»ó‹¨Žïê>”’é õ¦Ì[Eú’yDy*ËòÙ”¾Öy>Ž“½4“Ø]iDõz˜o›6´¹7«æµyÏÀBuÅvoÉp½op|S!eÉá®{IjI ©T“—ò kŠ¡õ”·¶§»ï…ˆˆˆˆôMC UD9±§†ù”lgoIPINòbŠã^£ +ßÊWž/'—å³£‰Žv~-͆òŠœS³–Oòò,ò*Ìn¨Òs?}­³“ٜGUörÌŽ&69œÃ]LÆê±ÆV÷åZh ¹¡äk¦‰ÜÔ¬•¤&­q 5 `~ÖF’Ã{úÑQMÁÊ$ÖºÆ'vœFÔnZOy6Ë—Âæ6;5Š«+¹ËãÙ=kKæ%FHàõøñÕÎáRŠ\¿šKˆê!ôð%26 v—‚µˆÒéD¶Ÿº `Š£Ã”æ–.hùf±]]·¦€.^jgVâœnk÷ŽœGâÄl6TÅe”[Ûmi ºª‚ò²2ŠŠr(ÜÝã]»Vï…ˆˆˆˆôQ±ó!½°Œe‘maw8'…¥5ì}¼Ý!¾!„1Ÿe{ÒˆiÿïÍê6¼ØÀ’íE$…•y,ÝZØù©»ëg êìF`T[’ÀRÍáü5,^™MD^2þ©ÙíµVôþ„ý”@Öö¶•˜p}!P($rÛKFJ6W6ª¡¸è–¨¨n€XJ7±Æ™f­f[VûQ0W¦õD…%³àÅp<›´ìyä%_ùñž˜Jܦ•®)BVjŠsY[œë|Ñg sâ’ˆKœÕj ™kËRSIó/Ê×. zíÕ]Ee:«4*„ÞMiöÆ·ß:‘¨°žV] "$œ[EM–Ý-K%‡‹ŠÉß]HiE-;¥ Œkö^ˆˆˆˆHy•¼Šé¬ Ë 5Á¹Õ{åÞLÒ6…°zG$PÑöY$F¬%;'‘Ȥp|JÉLN¡&¥€Œ€ç¿Ei 4{ ¥2Ϲã ¡²Áõoºîúé0•à*ë !ÄTLáîJƒ¨.È&HŠÒ‰Ï‰dÛºDB¼ šB@€¯kDÓUÔÁõCRO³>˜¦‰Ü´Žsܹˆ&×üÓªì 6u»ƒSÅ9¹® ëiQÍBS˜ßÜwûi=¾±¬Þ¼‚¨Î†Qš³;w-Ï,z˜Ù1ñ¤d6¯ôEYæ•¡™]}e^Å<¢†šÎƘö^UͰ%z=Ž(òmÕ¦”Š«ºl yé$Í}˜EϬ%»°”ò„`¨¼""""CLPY;Ò)J#!:šèèxRr‚HÛ‘E|§¿— "qÝF¢Š—MtB•‰›Yë ±,[BþÒhbâÓ(‹MeÙ”RÊ*,@Q‰aä/ŸMZQC÷ýô»ÎHf̧!ëa¢câyѨË~ú_gPl[ŠÒ®¼žØ\‡/‘I™ä$Ñ ïÞרÙ}Bn’P¨ ó'Gyéíö\tÐŒa"!fOÞÐC›É]X/4vßäÂÅëSÊ@ IdÕ²)®oŠY»¶ “íÊCˆŒm~ìœֽÔ5?Ž$¤9d²ì%£yퟤœöKÒµðŠ$1=ƒÔ°. ¤°(æ5?ÎÍ¡è&]–&7§ˆîJ·”æ‘ÓœÔÅEÑÛ[ÛPq%å‰ é¾qE19ý*4DÞ ‘¡îÆ…>i ÀÓƒFu=ªåÐyòlzk@»‘-ÃùòÃ+=réÊÓFâ w×QN]uAçÙw¤ Fü<_fûŽœ¥“•TÚê×9»Ñä¸úc®E-êèã}º.Ìüsû~~µŒWu^‘ùÐÇ,yõ#’¶Ÿ¸Ajîð…é<îÊ…¬Åkx±Ã§q_¢æÌq=.gKNi·DeÞ65O7›3‡¨æYAÞ‘Dµt³…œÒn·¶Tr¸Âõxb@/¦¬õ‘wñÍ«b³›5i9·n«’¼”×¢ÖÉäT^«Â®Òî dííçÐPʆŒÜ–)`ógÑ—¥ï*+»y׫÷’‘º¶eTQË”¶«1TÞ ‘!ÌU\B'v½p†ùŒF`úÄNæQx8]ÛjÔÇH_þsªêëùeéU~Ú9sмZð ö㮑™;‘–©QÝêÍ9ëëY´n_mõµìíšÎÛ4篇-€‰Äcºn×—Z®Fguôõ>]Ç>ã… ØÈÍ/ï ™}ÌŠü NWœ!ïØà”Ù'Þ‘,Iï ^¬¦e±·Ý‡qßø%4(ªÊ^JBr9¥T·jg©®`ï–¥$­)vSX¶$¾UáKLBœëËØ\”L$@`<«2ŠIY™Ïqs)–/`CwÝNYÈÆÕ±}qs-DDDpàÀJ7,åá® 7ÍbuF"!Wѯſt<>¥­Çk9ùk‘¿¦wÇZ,ÐfU _&6¯d¥xÍÃD¯Lsx~G¦s«Ð!ð^ˆˆˆˆˆ u7üH!ŸqxG«º1ÓÒæì¹¯Y-6go¯¶/Œ¼'gx€å/°÷²šö³&æÞ9ÓTfÒË©Q}:g;~~¼¶ò_xãþጬMëÞ^=ÕâçÇk+gòNóWû°ªÛ:úyŸ®9O&Üw'î1á×ä †^zë@‡@è¹ûnÆ]“|‰]±ŠY®ïª²3ØTÖn¸PPë¶­fÞ”·3…yÏo'«³« "aÝ6VÍéùÏŸ)l!ë7öw;ú@bã:N?3—SÓj$KPl:[6¯ ª-|¢–±±ßu ¬È”u¬Ÿßõc ›ÇóÛ;_D¼[Þ‘¤d=OÏo—QË6òÚóWFw²øTdÜ:Ti-§õê›ý½ênøPˆi£Hð€ƒ•ç»\mŽ~R͉6/˜yÿ0gÇÑ)˜È|o¸`ëåºw…òlˆÆ‹øa~EŸ‚–þ×ÒEpŸ®½ŽÁPþG—†@ ä˜@êª×7ÇÙšžÍávM¼CHÏÙÅöõ«˜?'¬ÕÎb&Âæ0Õz¶ïÊ!=¾›ôÁ;„ÄÌ"vn^ÍãqQ„µÞžÌg as²zóvmK%fn§ol;6.cNØ•µ‰|¦€¥Ýô&ßÈ$6ídóêlj‹ cJ«PÂgJsæ¯`õæmL&êF{›MĤå°sã â"š¯Ó‡)Qq¬X¿‚mét÷–t+(žÌ¼í¬_GT«{ˆÏ¢âx|õF¶ï*bcrá1sh^6ªªð@ÇÅÄÓÙòÚjæGMiYïÍàKƒµmyS¿"""""70›Í†‡‡G¿ú¸ ¦óøçÙµ³–ÞõáÙÖ[ÒÔÄÔ»ocÃWšÛ4ðãí<÷&xÖsä­£ü¸ ¦Ï¸¥‹éUy<æ y»¬ô°peJÔÌ{ÂùI«)P§Þþ€Eû]S£zœŠvuçìÚpîzhó~sšü“ÕüòÀxþ3bøUö1µt¬caU/îÓ`O!Z‚!š§’ÁE’ZRBovrIÜBIbO­| ‰I$-&‘´~T™@Jd)ýè£×çŠJ&s[roZ™BdBª L «$¡ {û~’UBozl9"*‰Œ-IWqD/ëñ !&)ƒ˜žºö'³$¾û&á ¤mLèÅŸ~¼""""Ò†çÎcôèу]Š ²sçÎ1|øÕæmÝø#…Ÿˆilùº/^Žóðº}|õår~xÌHâý·±á+­ÚŒ&´¾šä—÷ñÕu‡YqÌHâýSÙð@×ð}¢&ñ½Ê'š§Dy2·Ý¨£ ÓF0žÞOêòœ,4ýÕuøgWœÌ“s‡3ù»Žñqo.£·µ\6u|ÂŽºO×GëC7z $"""""înüøñTTTpîÜ9l6Û`—#ƒ¤¦¦†“'OÔ¿]Z ‡£?ó˜D†ˆ&¬1ì_Ê*Òe™Ñ,Îv>^¸¹„ÔÈÁ­GDDDDnlçÎãôéÓ\ºtIÁ›òññ!((_ßþmÓrL¹<1Ô_¹ñ=ZÓÇd@ÜÓÇDDDDDDDDd`)qC …DDDDDDDDÜšqC)$"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nH¡ˆˆˆˆˆˆˆˆR($"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nÈs° ‘Þ©««ìDDDDDDä3f̘>kp8ެEDDDDDDDDnš>&"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nH¡ˆˆˆˆˆˆˆˆR($"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nH¡ˆˆˆˆˆˆˆˆR($"""""""↠‰ˆˆˆˆˆˆˆ¸!…B""""""""nH¡ˆˆˆˆˆˆˆˆR($"""""""↠Éà¨.e[Z2ñ1ÑDGÇ’˜º…Òêæ×òH‰N!¯ºÛzÁ‚¥¿]ˆˆˆˆˆˆˆ Q …äú«Ì#åá Êg­bûÞJJ ÈŒ+'}i&e–âT°-i.ʪ?‘¡E¡\gJ·¬¥bIé áøàMHü*–…ä°©¨r€ÎÓ@U¹u€úz Éõe)¥ â¢ÂÛ½àK|æ^²âƒÚ>Ýa*Y™ÑÑd–X8¼-¥e ZrF€jòR“ d/nn 4”±-%ž˜èhbâSØÒ<_­:”èt2³’ˆ‰Ž!³´¾‹~EDDDDDD†…Br}5ÔPE! ¯²M,ΟÅú½%””䱄5dV$dmf!°ps ©‘•ä¤,¦xÖz JJØ»9ŽÒ”•ä´ Lʧ, ƒ½%{I5mí¢_‘¡C¡Ü¼L¾TRXp˜J‹/1i{ÉJì¼mE19æ³0Ñ5e-(äÄäW¸„‘rõýŠˆˆˆˆˆˆÜ¤ ÉõD¥”WuòZCe ½ï+üÿg¢¾ï}ÿ?­f1 Új„StÓ0)\“ìš3s"Þœ°v†õtXgzÁ™Š372¿–uZ—ß9•ôè9Þk:s~ýÍOìLØfŽË´.3Ò\1§²sãnj~Ð$¤Yc~`Ìbª‹‰»jýý±‹.° •×c†)òý|?ßÏ÷CºŸ/ïïûóù”±¿±!×n¶=e¦ ¬ wÿ$ó¼Bhg÷SfÌæèW¹ ®]/CòMÔ+""""""r‡RPHn± Š¡»«wÂvñýn;åîÀŒjK3•R×ÚIϱÃ4[C8+]ô'*˜b$‹b^:æÃç‹ûŠÎ-»ùzEDDDDDDîP É-–„©l/Fw õž~¢y:a{ê¨j1R½c\&͈Ñà¥ëhtñŸ`§ wìP¨»†|»;ºtRéFRSSb;šHM‡p(z2æaËiÇåŽ]3äÇi˧ª{bfÒÔõŠˆ²™ÔÓ IDATˆˆˆˆˆÜ’[/ÃJã¡*ŒÝUXÍfÌf vwU‡±LXº'—’ºbB[1ç[Ø7RDE^ôHJƒæ¼3fs>ÛZÂ8êlD÷/3b²eѱû©Xà'[C3&ïn,f3fkƒ¶ýTL õL]¯ˆˆˆˆˆˆÈÝaÑÕ«W¯Îw#DDDDDDDDäÖR¦ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ¤ ˆˆˆˆˆˆˆÈ´d¾ "7/ØëÁåê »ÇÏÉ€dÖf™È/)c‡5—´ gôâ4ïÄEÕ‡±N,0gBýœµlmvroÝe£‚ì[jñÎ䜼j7Zô¡ˆˆÜ zfvº€œ½lµaœ´d} µ^(ÙïÃ1‡ƒØh›æú:¤×s€VWþ“ŒRÉÊÛ„ÍVJa¾‘”¹¼¼ˆˆÜ6”)$r' úi-/`ËÎZt„F89pWíN¶8ð Îg#ã=Tm¯¥c 2ß-«¯çm3`νpÀ£` ;k] D†8ÚNýîmXJé Ïg+EDäVQ¦È&Ü‹³¼×IH6•à¨(¡ +ƒ”$€0Á€ŸŽú*šüG©ÝfÇp¨ËBOwI³Òè³Nøñ­{#+""·3o}=y `¼ ÷Òâ¨ç般¢½TW’–4z`À«¦’}(¯ÌâP£E³""w9e ‰ÜQBt×FBkKšñ4;°æŽ„’H3æSÖÜFuž"^j[üèeŸˆˆHbYyy¤G¼Ôîë&4ß™ca¿‡'MÕ4×XãB0ú aon¤$"Þ&:úç«¥""r«(($r'éwãìŠÀÚ ê¦)æûg`­ÞKi÷à½ÁSn¯ÓŒÙlÆî &:ŠÓlÆl¶3öp[å¶Ìf3fs>–RNO/ñÅzfÌ×Öóq±ÓlÆlvÒ;î*AUeòÍfÌælåU´ù´'èÁn6cvöÂ`75¥ù±k×Ñ3‹Oówé}„ýÔå›1ç×áÃõ~rÒK˜€§Ž2K>f³™[9užþ)þØÑßéÄ1Ú—ùJN:ûïö?ODDæ_j¡ƒ½Å"]U4öÜÄk” Ÿ¶ªrlѱ4ßRŠÃÙÉTá¡þNœ±Ïü²:Ï”å¯_§ K¾ùÚØRÕæg’Q*ñu‡‡È+˜â"É„­, Â?Wûèøk÷$ˆ¿Õ»çèøßšh̾~Ãt:7ÑG¥cî·ÎÝ;£û‘SPHäÒïu3ä”’}£Âi…T:Ì1_ ³¾Zä ûVÊ÷uá¿¶ Q„áØzF[í3xh¤»ÆÆ–ò}tõ ]uh„“þ.ö•o¥´±7q¦S¸›F{el¢Ãd“1‹÷i̳‘xÝGI´ÒD¸¯“ŽŠ,ä$=ðTRZÛNßpìnNúi¯ÝŽÕÑ9±®p?­e¶?ïâèh_F†8êâùíV g ‘ù‘L¾½ŽBC„öú¦X z‚=uض”³/n}¿ÈðG]ϳÝê ÑGx°»†²íÏã:[Ï'2L_{-ÛËœtM$쮉]§ØÐÂÈI?]ûÊÙ:ƒõ’R¢¥·Ë›ple´µáóùhL¸#E¯³œò¦£±{ŽŽÿM“ŒÙáþVÊ,ÛyÞutš}¦¿µ Ëöçqs¿íõ;Ùj÷LÙv™…DîAüC@:9Ó(ŸDZFI7.8ca+õÞ¬-¡ùð1|>>Ÿc‡^¢xíØ”ó\‡ßájò(a¿Ï‡Ï縶Ù §žÊŽ“²Š¨~ùH¬®c~¹š¢µ”ãLôæ¶ý(¡ùHìÚû§Ú9æ&dl–ôyð&xúèî ‚"Kθ>vQ[ë'kG3‡ŽEÛvd99ú<õc2Âø›*iê‹lª ùбk÷¨y9†Ž>_…;0›7&""¤°§:˺–I^FŒ줶²“ȉûÌ?vøeönJ†‘£BöÓTÙD_$SÅõ>ò;DóŽ #Gy¾ÊM ®úpo•M}DX;æ~¼\MÑZˆxkqv*“VDd¶(($rÇbÀ `$=u~["˜ÊJ0Å­G”‘O•Û‡Ïç¡ì†©L½¸ê½`(¦a ÖìÑTŸ$Ò²­Ô4V“G„ö–Žo ”Ô80ÅNIšõèWE›€>ÜÞÀ„vw¹#`(Â2>MH/i¤Ùn"#v(%·ŒæÆÒoKÇõ?‚]´º† g/­Íe˜FO ‰ “=vNÎ6­ %"2×Ò,{Ù›'ÔàšÆZ:ýMx#?ó“Ò²±9[Ù›œlâ@÷õÆ`W+]±slÙצp¥dÛhhŽŽãõºêñb ¸a?5Öëç$¥ec­i¤:"í-tL'}&){cC4¸2ìÅU[ζ-OŦ-×Ñ6Í Q^uc\[’0Zjh¬Î†p¹¼×¦K»Z‰s­4—]ï#’20Ù›i,I‡>'m×Ò³Bx].†€¼êæ1÷›’m¥¦®‚tàhG·¦‘‰ˆÌ…Dî8F2æy+Ñôs}=fú’ñšÞnÜÀf!?QP'ÃD¡ èë¦wÂÓ_&ãM^wšRò Ù ´v}ÓÛï§;L˜:Y”ÙL2´’LEØÒ!/±÷ ¿ /`²å%ÌrJ2PD:zøÒw#""SËÀêØK'iª›½2Q€¾®!&ûÌ#…e…tùG?Áƒôv÷éØŠŒ¹6ʲÆ×ÓKwt Ä’x Ä(éž8P&–Q@û_ÚKñ¦µ$õíè÷Õ–³mËf,öV&_"¨˜’‰Ë6 ºü±1+ˆ¿Ë ˜°åÔ“„© ˆÐÑ3ÚGø»F¯‘àa'» χ¯Ñª]ÑDDf‰¶¤¹ƒ @$À`rçñi(š~ÞÅ“}ؽ@òÚMXËJ±˜0NsmŸàà@t !W9f×T%ý 1n«àTRg}­¤qRò±ÂÑ.7Þþ2²cÙO£k;$úCÀD–1QeF²ò€v?Ã@ øð×nÅ\;E;"óþ;YŒ6öV´±½©§gÓ$kê„ ˜0—H1šÈ¢‹À AL¤1Dôc²Œ_#Y&ó 8È@t ¤|êÿÄr )ómTåÛ¨"Lh0@¯ßOw·›®£'ö6Q¾­—:'V™—KV¢øTŠSt DŸSLi£÷ë§v«™©‡¹A‚ä’ŒãL¹çbþ»ˆˆL L!‘;Fzôa‘CÃóÜ”¤\ì­/S]œ}ÃŒœ<MCßœOiM‚•‰L÷‚‘éU)äÙŠ10„Û;š+Ô×=S`šÉk)ã‹G¦Wósÿ"" OvI %éà­ßG÷4–®1Lv ÙÀøØOôc&¿3'nz H"%#›|k)UN7ÝG^fožFŽR{`ü^¡SIÆ0ߦq Sô«ˆˆÌ*e ‰Ü1ÒÈ.ÈïÝ}ƒ”eßx±éþV+åž /P•?Ëi&)ÙX«Z±V…ø»ééìÆÝu”“#:ž§<%ƒƒŽÜi-tmرŸ{î ΃¤ E†vÚÝ^ú˲Éî÷ 0£˜aB“L³Ë{á0Ó~»+""s*)—нŏw·SÕXÄ‘ª sºÆ˜4î1aü;œ›Îø5ì`/5R=Ø·Ôâ½Q])ÙØÜÛêèöÓïȽñާŒIøÒ*7N?‰ ¢—!""·Š2…Dî £Û¤÷µvqÃ50ýt¸‡99HFê žÄ S¼MåžT F“•Ò*'în‡ŠI†ÜÝ7\'͘…ˆ nß#“LXmj¡£÷FSÇÞL ¶Px9±•DÓs¢{²ùû†æ á""r³’òíÔˆ´×ÓäIP"c@7½“ È¡þØzpÆŒØú7£¿~ì Æý(ÍHVt œdl™´,Lé@¤ ÿ"F³œ†Cƒ3þ6?À?²0¦¤æ&»ßRR£‹mû{ h‡‘[BA!‘;IF!e…j¢Êégò¬öþ¦\C@Þ.ŠnðŠ/Ýhb댯)¶òuazê̘ÍfjlŸfÊŸþ›Ì¬|Š €·•½ žþÂ~œV3fsÕ´RøçJnÑ.Ò‰Ðí÷Ä-$?w²4!/î£ ? û;¢F9×Þ§åDÜ-xÍ·ô`7›1[[o‘Y”BÁžjò C¸êœtMÈ€1’S˜ áîH´Cd€.×Q M£™Fi˜ ó€nO‚-äGq{Çÿ0‹üè@IëÞ× ãwZ1›ÍTÝp Ì&/ºã-Mž)§yŽºñ†¢|&äIEÜxŒÿƒÝnºÆœ“FnAÁÝ’øzƒ;f³kkl”KÊ%¿ w¢û û©ËŸÿç‘»‰‚Bóæ2#ïõó/ÿîcKÃqži8ÎÖ‹_ž}êøˆŽóLK?§çµx%ÚÞ„_¯|t [rûöÑÜ}H…“®r,¥5¸{ã¦%… zh-·Rî: †<ª÷Þx‡Ž4£1š±ãª¡®g0öÐ"Ð餼öè¸ÒI˜,Å€Žª*ÜýÁë©¡~<ÎÆq…@RJlKÙÃñqI&l»Ö'q•—Óè¼VWxÐOc¹×¤—”7׋JO%;[: ¹[h 1MÑž¾z5žþXÐ.Ì ¿‘r»‹! ï*âÚÄ¿Ñ _ÄK­½ϵ¾ ì÷Pc¯Å äí*šf꾈ˆÌš4 {÷æÁI/Þ©¯ÙEä`Èe§¼ÑÏ`l ûq;ʨïÖî $nK+,¡Ø‘öJv:{®ê÷P㨧oÂU’0Ùv±–踃ø˱GÊ1×™LvI%k!â­e[imãv ûét–QZßäà(M”¡½2~ÌÑï©Á^ëpNFaÑa®{‡þàµNŠ;']×Þ^¥W²ƒµÀÑÚrœ=qÏÁ~ÜUÚ#`(¶ÍïsˆÈ]Dk Í“‘¾w);|‘GŸ\á§Wa¸|Ž·_ý3{~ÿg>8¿„Ÿ==›W»Ldðc~ùŸ—°}çaVÉÚûÎF^¢A™OèX±‚—weézçÖìöÁ¼J³ÐxjìÏÓ1ÐAýÎê•KÞÄÞÖ:¬7^zrKØ›ç¦Ö{’öÝ[i¿vÀ@^u5%µµÄïy’dÚEõ¦.ž?z”úíG'^m ÍŽ¸É”t²Ò¡k¨ƒÊÍ@ û}rì²F^è-åù£}(ßÊqUr*h¨˜ÞÚDs'›‚²,šê£)ó……&&-¢¤¤Wív:Ælµb kG3Ž1[ §PPÝÌŽ@9:¨ÝÞ1aw–µE ìt÷™KV{ÝÛ¢žñÒ,44÷QºÓEßr¶ŽÀ’7ñB£1‰¥Iù8šwÐW~€×n¶Æ®k+¨(i¢iü&cÙe4¾ÐKéóG_ÇCEC“&°ÆKÊÅÑØ@È^IÇ@ûvw±/Q9C% ØŒ Ž™J(1¸ŒÙÉlz¡nì9)T7ï P~€ŽZ¶wLå(jØKü0—”» çÞJ뽸voe|w²vŒ}Æ‘/E™Bóâ^ý‚Ͼv?ÿôôªèî KVòè·çpåF~öôÊY¾Þ ÿÖö)íçþ:ËõÞIî²>ȰPã>ÂÁ—öPhÊ"õÚ’@R³6QR½ŸÃÝNlÓÞÏ5kã!š÷’«,yí&*šÑh5&(Ÿ†ÅéáåJØ”•zm‡äµ& ÷4sØí÷pšMIc5Å9±½Ê’‡¾¶6B§'z/9ñuEïãPkÙ·Á“ßèzNPHáTiB¤PèheÿÞB¢·k 5§=͇h³'n%åbo;Äþê6­ÝË-vÎKi­)`:q=™ Fl{+X;ÉѤ\îÃÍì)41únHÍ¡pÏKô8±$øOʵÓv¨™=…9±ñ;™œâj¶•ašä:'žƒ/Å$¯eSI5ûµR6“2£ ö ±—âMY×Ú}†0EÛÞÙ†c² * Yìh<ÈK%y×Ú¿vS /¼ìÁ™à†£÷»Ÿê’MׯeH%§p/l¥¦`ü9Imt¾ü%›Ö^ÛåtôãP›}z0™–EW¯^½:ßXpÞ{›­¿½Àš ß éÙñ›•ŽŸ…3þßÃxZþÌ/Îß‹³2‡Ç¸Ìé×ßáGþ‹œº÷,^ÌS¦5ü§ÅÊÖ»˜|÷ 6_èçÅß§ûs¸gñlßzˆçÖ¯¸~ÝeK)Zr‘Žó~ül`óòé´1jäÃIêîëã™Ã_ðPöWÙ8ôÜçeKù¾ÉÀ ¬üŠåìÛþ(.gL[ŠW^Â3t/á¹o?ŒíÁä׿ÌÙïò“#_ðΕh[É^ÍϬË82£>˜¬/•­± õ·bÝÞÄPaGê d õâ4ïÄ—%""r×ݽ,¯šÃ7žš.""we ̓‘¡K\JŸ, t†?ä_߸ÈȺ¯óZåcü,ó¯t¿ñîS©Xw=@Àм\ùÖÅð¯îóOý¿­|Œ–õWùuÇû¸OÅÕ÷ù%Ö=Îk?xt’€Ðdí¸qÝœºÄSÛ7rp³>¿È/z"X¶?ÎkE÷b8½:n±ãÏ/Á“YþÁZž[~™wÈ.NÒ¿ÿräµÊÇiÎ^Ä;ýŸpà½öÁ¤}9ƒ~»Nt×1Ŷ¼)¦Ž‰ˆˆˆˆˆÜ9º[\‰N‹ú¬ÿc*^ù3ä“×*ŸÀ–9±èÈŸF8lþ¦‘dîai9pw_ÜrÍIËØ˜±–Ü3£fL§î{V%óèrHÎ\Ê:€Œ•<¹r Ük€ áKc+]¶ÛÃɰd›×-.rüýO}˜}ò½¤ /¾ÜËú¯W‰$ØÔjÊvΠ/eaõ»ij‚ô]ØLÊY‘»ƒšžÉ«sðÁÐ0äÌR¶Ðª¿á{ÙØÓ…wNÇÑÖ7n*Öu#áèܪŽÃÇé8wà|äú÷I‹®­í2Ó©{ÝÊ™¤1æ¿ÒûWÝ\ŽõݸrgÞ£âWáýeKùþÓ™Xïÿˆò?]™y;gЗrwëušÙym…K…›v‘»†‚BóaýJ¬¿û‚öÁsŒzm½™¹ÄȘ ˜dµ>Ááoãýwƒyã3~}6:ëзǞiX¼€¢-ùáøÀ _nKù)ëîK´uÇ4\¾þíÙ3Ñ,¢èÔ» cŠ~+Ä;@ÑÓ¹XsàôÐä÷2u0E_¦ßÜ=È)=gÉe$9‡bG ŽM‘»‡¦Í‹Lv<{/÷}z–Ÿ¿~†Àås¼ý»?²¥á8ÿk8Á9V'"|tøð/tÆ…Þ{›m Ç©x5̺œ‡xîï—ñ°f¹XÆše@ø*#\æþ +ØùS€.súÕ7y¦ÁÇ‹}_|é;›“º?á:q.ŸáÈû—añ½l^7±XrÒbNù .|„ëÝø,¡ôÁ”}) IšÅI·Ï‡¯»•*«ñÛßæâðùði‘i¹¥Yiôùði‘i‘»Š2…æIrÎzZ“>àÅWO²õ“\–/3`û‡oðÜc³``5ÖgÏr¤ã{ÿ¯ã<’ýUžZ¿ÝQëá,š6¿ÃÏ_?É3: ,bƒñküøÙTà2O­_Â7C”7œà¿ÛÌüØv™÷)ÿØð)÷,^LÁ“køaν_þÆRš¼î›LbÙ2ÖzŸ-¿¿I~ðm#-X,ùé¿áÇøŸo¾Ï–·óTæR29þáiÈI›ALÕ—"""""""wmI/·±Ä[Þ‹ˆˆˆˆˆˆÈ—§éc"""""""" ‚B"""""""" ¦‰ˆˆˆˆˆˆˆ,@ÊY€Y€Y€Y€Y€Y€Y€Y€Y€Y€Y€Y€–ÌwDdz>ûì³ùn‚ˆˆÈ¬ºï¾ûæåºSEDänòeÆÓEW¯^½:‹m‘;€¦‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ ‰ˆˆˆˆˆˆˆ,@ É7ÂG§>›ïFˆˆˆˆˆˆˆÜrKæ»·x±á:X̾ûÖUÑŸž~õM¶¿u…¢-ùaÎh™V¬àå]Ùœ~å8ŽS“\"3™ÿ~f„ÿ;¼”úÊ\ž¸øgþ¥i˜n`ã“ÙüìéÀeÞxåì=µ˜ï•>-ã2#ï}À‹¯†8öùU.Ë—°æ¯á¹Çî×þ±îY¼„ÍOgòϦ´ëebm] @Oˇüâ<Ü—ù­ßYCò—èÅ;Ë'¾Ëðä“F~útÚ|7HDDDDDDä–QPh‚+üÛ«fóöoLTkõw6ò8s™¯üN…ùà<™ œºÈñع'†þ¬‚|pÀÀC0Ò÷.e‡¿àþuÐV´†û—|ÁG¯°ç÷ïóÖ§Òôl\0cÌõÎóÆ+ì=òëV¥aËßÚN¼ˆ„ÒSiYP!`xˆ_¢A¶coø (0$"""""" †¦%piè,Þ»<5/a]º¸Ê[žà£÷?çppéÌç|pîo…ôe<Ê)¼úŸ­XÁO¿½†û—ÜËš§³øA:¼óÖÿÇ‘s“]oO~s)p…#ïŸ"u‰^}ºÊ=+VдýÜ?w|[K}˜Ÿ-ç!®FC¯ç¹Q"""""""·†‚Bã-^Är®Ò~äNÏAõÉ&ñpâÌ_€ó|0t’RøþºEþ‚·†¡/8<´j†wÏá¹=˜:.3é^þî›Kxÿ‹É/xùjÂôj»ÞºÌ¥¤š&ÉzZ’×?Ê>†DDDDDDdRPh¼å)üpÝ"8ž_úG—9ží Çy&î«âÕáéÕŸ¹” À¥3_púb?œ…{V-ÃjL.s"ð§OE¸Ä"6Ó9s…KÀ#é ¦5-YÀ'g?O|­ËçøC0`ÛpßõŸŸ?ÏOÞºÌ%€ðç?5YQw†DDDDDDd!ÒšB <òìý¼?LwÏGüÝú¦XSèÆîçÉû?¥ëìEÞÿÓ%ŽO­û ry1™«Æ-½ØÀK×ów›É÷ï‡Kçÿ2yVÔ1òîÛüäÍÑŒ©E<õd¦B""""""rWSPh«ŸM§8 .\I¼&Ï—±&= ¸Ê;ç¯BÒ½lHX͆ÌEðùeÞºËW­ˆfî¬_‰u1|ð^0ºõ5#¼ñn0`ùæŠqWXBòcñcã".}~uòY¾”GR—+±üýrîº{>âíY¿Ó;ÃÈ»o³§ãÑ€¶§‘»Ÿ‚B“JgG¾{n\pæ\ÆÆØ·Ë3W²&öýºÌëWÛ˜ùÕØw™ìxö^îû<Ä¿ pú2ÀyÞÿÝ{üë<²áo&L ‹º—ǾµŠ¢$¸p*È/û&YŒúá¿áû÷á üût×Eº‹( $"""""" •‚BSH6}ïO„ M?ÓÐljéV›*ñÁ0—ñ‹y,óÞëíÈYOë?~•‡Î)ûÅqžiègχ_ÁöëhzvŠÆò5<·ù^îã*G>œ$h%›ŸM!xç­ùÃ…éÞÄ]`ø=~¢€ÜáBýêÊm˜Í˜ÍùXʪhógX¦§ÙŒ9Ñ—ÝÜ,»ô`7ÛñÌ´ò›=oV… ÏçåED‚ »ÙŒÙÙ;ß-™Ú„v&Só-”ÕyÌáà1òîÛìj8Î3 oqäÜLÎÆÓ2ƒMsæÄDf{ß›3ýT4¼‰çÌ\ž7[힃û™-4}Í~X¹†ŽùY*Ö]©X§,3“úF­¦Ä¾šì5É· IDAT’ñ?N}˜¦ÊDå—üðÃüóÃðÏ3¼^rNs®ÿûÑDmÊ\OkÂëÞåRÓù®ñ? À“ É(Üë¤lg¹/8él6’Dˆ~w;Ë·Ò×ÐI]AJ¬Œ›”Šf<͹¤&ÐY£|+Þêƒ4Z3®W˜WÍáF+úÂT´•–2´·Gî|·EDDn[ñcjȳ¬œmå¶Ú0ÎúÅÎs¼ç† òÚT/ŒoKC¸?æôæü0çÆ¥o³Õî;õþån¢L!YÀ’ylÛzZ¾óBrêÇUåâd‘ƒ½#Ñäòmôøz¨+H¹^&o/ e¹¤„Ѳ—ªbðַКÇ[¸#…ˆÌw#DDäN’bÂQWAz_=-s2ð^bä2<”~'>Ï^âô\dP­Ê¦©ò ¬«æ n`öÚ=G÷/2Ê’.™5™óÝ‘›ÐïÅ=y¦¬X@(Þ.Z†À´Ë4.û' “Åí.zz÷R?ç­½ =uå8“ªhu˜H z°oé¢ðp#Ö4ˆ¦âï„ý>éG”5ºè#‡bG k,0ꥭª’&ï0¤æ±«®š2SZ4½‹ãŽÜØ~þø§Ã+ÖÒÛÚN_$•¼Š—h(Íž¢ðØwâØi޶'wŠk¦¿­’ÝM^†#Éäï¥ÆaÁ8é/LDD&ÕëļӅiÏ lòî£É;Œ!§Ç®\z[œ´÷lÚCóK¥dÇ>gƒþVj«ZðGÀ0ös>ÜßFåî&¼ÃrŠËÈ=Ù„Ë_Â~Ÿƒ\ ðଉջv»jj(ÍM¹ùö³ÈÚýê f3ÕtOËŸùÅyàðq:þô¯}g œ;Å/_9û<°x ÖÍF¾ÿØ}ÑS?à_~s–îÏá¯-gCüÔ¥áÿæþOì<Û·æ¹õÉÑiU¿ºÈCÆKt®²ùþ ÿ¼êcþå7g9ö9\É^ÍϬ™$obÜ5Ç–]Æ‘–Oè ÚvˆeËLÙ† lxr)Ç߈.±ñ‰µ|oÉ'ü䋜Š_"VÖòÝX`h\cúc*3i÷¤}>ÂÛž÷øQÿe.™é_å§¶‡Y³tO¢zDn1e ‰ˆÜBC FãäoCÃCD€,cÆÄƒ†èÿ ǽ±ôÖ²eÜ:ö¹^¼'ÂïŒ Më$?®î,êº}øÚl ÕÛiê ƒ¸í;ñæ½D§ÏGÏþBüöJ܃£çuЛZG¯ÇYøé¶ÐØíÃwpì«ÄÕÏõ¤amÜO P2šêš½-ììÈã¥>Ÿ‡]Ôâìš×ÅDDîxþ®!L y©˜H_;µõX»ñíßAŠ•ÑruR_Þñù|¬HÅ;ú9öãܹo–ƒC¾n-Ãôøã.ꦶ´o®“#>m¥öí´Ç'7!)%úÒ!˜åµúR±îú?XløF4 ÄÜ/Ÿæ­ô¯s°r#‡·-ã߈ûÑc¿9Ë'™_ç·•Ó²á¯ÿ|´®3¸_ù”wÒ¿ÎÁÊÇ9¸ù:;>àȵå†"¼•ôuW>Æ??v÷o΂)›Ã•ó»\ÎÙþ`\Ùxg&)›Šu×E[F"7jÃ:‡–±¯òq~û÷N¼y’ŸŸK£©òqþã‰Å{ãt‚u^ãëÜÈám÷òÎï?šÆÚK3l÷d}þn€=$öý`#¯Uf³ƒ¿ðóÿŽýîÆ×#rë)($"r·›îl§¼jû|øâ¾­s™Š>LW}åÃö„ÒÙUa% ÃJ™mw÷¼¸ûŠ)±eGëʰRfëÃí ÄÎËšoŒ«'‹R[ìºYb8Â4ê‰3UYC ©.º:û §_Õ3Çý)"r÷3˜Ld'ARV.&€‚BL)@º#04ôR,8¢ÂॱΣeF?çºi@^Ñ&2€“Ò¬ë×ù»èŠ@AatŒÈØTH“Œ·£Sgqnà¿=›ÍÚÉ|ˆïf^ÁÝwúÚ1ÛÓé$³„äÇþ†+ÆžwýØjJ–Ep¿õY¬À",­îVaûÞFþ‡i #g‚¼¸ÄWáJ¢Í ì4Ú`Ë7F=¸”u,Æò·Ñ²÷¯šdßè1u™ëiªÜÀæ•7êÈ™·;aŸ/þ †ðE<=C|tá^6oßHÓ³ ·™š>&"rJIMÇAÈMh-308¦±ÙBáPt{JÊ|Îe`0µˆ¼.7݃X$4%f$5îYjmV‘ `€vv?Õ>¶xI6¤b“Ó>þß1SÕ3“²¶2ö7Ø×¸›mÏcÈ)ľ·[¶æ‰ˆÜ,“1}zƒØ·>?«„½ÎìÛê½ÑCÆŒÑñ3CܸE?ï];ÍÑ)ã_b= p(ºs¥1cî7t8w™OˆPÝt|ìÏ3#±c‹¹ïZ@ä>2WÆŸl†%À•¿Æþý’¯ a_ð¶çöô_!9i Ò¿rmÚØéWßdû[£‘“{qV>„a’²‰Û>Ý6LÓ„:¯›ÐÖïÆ–&¿ÇÄט¤Ï~˜–x_ô|Ì®7>†evüׇ(Y?im"·”‚B""w¢ÜBv¥ É?@Øš–x]¡Ñ2n/ƒV×c.aüÝ`(¤Ðô%ÖGøÒò(«¨Ád²³­¾“¼FË4”‡‰ŒÀhá“^ †]b$‹b*ŽU‘?¾C‚é7kªzèAYH3•R×Z á ýµì¬t‘ã)#{ú­‘›ôvà@‰Ã5×ÓfÓŒFÀK`pôÅÊ‘øiO±)Ö×§ Ï‚À^ Ð”uâ_ÚÊ%<€ïUlàï–Ž;vê] Ìg瀕çùìpÿèyñÇ®F·J_œ`rÉðÇü{ÿ_±Ùç¹—À`?»Xýì¼öl|Ù÷¨˜¤lâ¶O³ Ó5¡Îë&´õLÿ´î1ñ5&és`õcò³Ç€‹Ÿñö‘{~÷Ö?Ê£7W"³FÓÇDDîHÙ”Ô•°¶ÃI}g úö‘ýn;ùæ|ìÁëeúœT5úck„èwWRÙy{+(˜Ï˜PL†Õ#T˾ÑYÒŒ ^ºŽFovºp9c€V·ŸÀ ‡V÷Zvæ‚1[N;.wôXÈÓ–OÕLwz™²©é×3­¦*ê®!ßî&’ÒÈH7’šš2ƒir""r³’R¢Ÿ¶ƒƒA÷âj[4(«€bx;Ž2„ünÚ®NË·Qh€î®èX3è±c6[qöÞä6Q¡~ÜM- ­­`Ç­x3ïǶ,¯_bà\€ó£×ÏAæ×(Yáßá2#ï~̳cÏs¿>=vâ4®ÏcÉ™|Aæ‘ðUàcKDDæJJA EYxŸßBþV'ak19€¿7I&û÷7àdk¾…ªÞ, LcN¦º­š¼^›Íf¶5…(~¡Gî æ-Å©–ÝxÒ÷r°íVeŠ®Âö¯ñÈÐÇlk8Î3¿ rʸš?½HÅúÕXÎ}ʶ†?Rú:lüZ¢óþȶ#—°­K¼­{ê×ù^öbŽtœ`Ë‹p<û~Š“®ðöP‚àÆ”e¿ÊS™‹8v¤½~~fm¸ÉþØòÿü…ˆÉÈsÞà´›n÷Ø>OþÛùYúìiú#Ï4œ ìÿ½Ž5 ‘[oÑÕ«W¯Îw#DDDDDDæG} µ‘j5[¼‘E™B"""""²p;±›Í˜­ÑlØP~/²Œ ‰È‚£L!YP»ë¨ªï o8$“Sì ÆaŨ "EdQPHDDDDDDDdÒô1‘HA!‘HA!‘HA!‘HA!¹k…ú=Ô•Û(0›1›ó±”UÑæÆŽöâ4›1Û=§¬e>ÄÚÿ•o¡¬ÎC <®ÌmÙþé ÷·Q^`Æl®¢;4“3ƒxìfìžù¼ó0áš™ »ÙμޖˆÈ-‘h ÄcÏÇl6csöÎþgìM·ë‚žèööÎÞ±uL9†ß¦ÎôSÑð&ž3óݹ•‘»R¸×IÙöV"ÅN:}>|¾N^²†h*ßJÕÌ"Ó»Þ ŸVgçìhòª9ìóáóùðuÖ‘ë¯e[¹›ÀíÒ¾/%„÷À> ŽCø|u¤Ìw{f"@[éfšzo\RDD¦#L¯ÓN­7ÂÚâ—hvärÇï ?‹c¸ˆÈ\RPHDîBý¸ª\œ,r°×bŒ=X¦mk¤Ç×CݬG ziÚZNS 2ËõÆI1ᨫ ½¯ž–î‹ÃçÃ×h%ívhߌ… …Àh̘ï†Ü„CsЗiV}Xoü ¹« z*)wÄWMcUþ4Ƶ¹2“±u&Œá""·…DäîÓïÅ=y¦¬¼iŸ2¢d6;M쮣ÌMmÏ·”Q×=+·€·–-±é?ဇº²Ìf36m½¡±×)wâ´çc6×Lê”1‹< ËHÐÞ™´o’6„zqW•aÉ7c6PVç¡Nž]ƒxì[¨õ‚kg\Ê}¨—6»…|³™|‹V\^Ó`w¬?ó)­é`LL&è§5î¼¶þX~~ЃÝ\ƒ³±”|s>N8VO´¬9ß‚½­ò) Ë^ïO×N3×f LÙ;m±©rùª:£¿«3æüRjºÇ”½6}l\cúCDä.qÒcg[­—HÖÚ­Ä¿*H<–†è®2cίÃ?úpSj6cmí'~œm­+£Àl&ßR…;nîVÐߊ=6^Ž Æ­_bÌoÌ>ËbÓ¾~ùúÛìj8Î3 ÇùÑÿ:ÃG¯÷RÖpœg|üäõ ðxù8{^?wýܾ>¶´ôszB¥Ÿñ‡—}l{%ÀÈèÎâ—-ÇÙÒpœ-/þ‘;ñÙ ë‘Û‘‚B"r× 0³øž/ØI}e;¡ûñù|4 Ó^Y…{0 kã~J –*Þˆ5©›ÚÒZ¼¹NŽø|´•FØ·ÓŽ{0®>€¬½=ø|5ÓŸ:•” r§Í¤}£Ý2¦ ý´–í¤~°€†N¾# Ô²½¬•þ/Ñm‰¥amÄ]UÉPa3G|=4[‡éñÖ5ˆ»²ì¼Î:#îµqíô¦ÖÑãëÁaÆ]U»Òãóqlÿ†÷5Ñ0Ö28IÙëýY²ß‡#w:mðâî˦¡ÛÇ‘†Mt×n£~ÐFc·ÃÕétT¹™8ml=û ðÛ›nþ‘Û‘¿{­—À@câ%¡ÉÆÒòŠŠ!ÒA÷@´è ßÃéØò²ãêî[3ݾC8²º¨/uFƒH¡NêË› â >Ÿƒ©x÷Uâšj°»™1{¼©ÆðYq…Ρeì«|œßþ½ožäççÒhª|œÿxb1ÇÞ8Í îåÉõKyûý3œõöûY÷`«ã«º|ž7^ù€_,I£õ;F’8ƒûåÓ¼•þuVnäð¶e¼óûqŸšf"rÛRPHDdZ¢©)'›vcwzÀvŸ¯[‚ÙO!]((4‘dl*$>ÜÞÀõBYäÎêÌ©é·/QÂ=nšNBñ®rS€”\JvÃÉ&Ü=·`eÌ€w_1%¶lR2¬”Ùb}ðâî+ÄfK1í`W^üyñÇJ(ËíÂÝ3úÈ…5ßû>[kUùBÁBŒöÜx3(;6”Ú¢ÿ-¤dåbŠ˜(,Œ–MËÈ€H‚ZÇÕI†Æž/ñLjˆÈí(!²¶„—^*!>œqkßM5–&™ (&‚»«âïîƒtñ1!L6 ³“€ 6åA¤=DJ±à&4úQ 4 ;±oÞÊNW£ÕÓžÇ Ý̘=ÞTcø¬ø ÉÓ™3¿ôëX3¯ÐùÖ0œ:DZ%ËØ˜_à*§“îá‘OÏqdðòõŸ»Ì'D¨nŠNO{¦áì=ŸœL£N¹-™ïˆˆÌºÜBv¥ É?@Øšv“;˜ŒŽÿwÙ¥Ít—†ôöÐÓÚ¾£û¨t™ð”;5öz}zQ¼/±eU`/PhÊ‚ û—Ì } ¤¤¦…!ÖcÑï ¤gÜ‚ÈDŠ‘,Š©8VEþø_X`€,ü ‡ ™ 1<^ü±H4c`¢`7MûBì:ØM©ôP~  €4k#>k|ÙNì“”MÜöi¶aº&Ô)"r2æ’•‚eW-G›hwº°å—‘:åX 9–" ínܹ%Š1iB\û(¢—3¦ôvà@‰Ã5oÕ& cÆðù´„Ǿy/ï¿þ)àsX—Áš1dzã[ßdã[ÿ/eòÔ®‡£ÓÀV.á |¯b·tb­S×)"·3e ‰È](›’ºÖv8©ï Ä Ñï¶“oÎÇÞ™(ƒ(càdôàˆ;ÜߊÍlÆîa̵PºËF`LMRÉ2Ã!F€´|…èîò=vÌf+ÎÞ/1 +Ô»©…¡µìH4‡híK$ÉdeÇZhoqÑÚÕÒkwa3Ý‚yØrÚq¹û „ü8mùTu‡À¸‰’¼.[¢ýêuÓÚ^nOô¼ßE«?BÓdïaCÑ@ !ü®Vüñ5L»¬ÔôÑ ÙÍ´aºýq½NB=8­6Ú7_¥ˆÈm-Û†½Ð'›hòo8–&åX(2 ÑÞÞ5qꀿ—? r´Ã é%fARJt  B¸W«Ÿ9w£1üVËY‰5â_ßú+O­K´òÏV?›ÁŽËᣠHgÞmY„_¿:}–8àÅÆãühôø ë‘Û•‚B"rWJÊuÐúrW9›ÍfÌf »=)T4¢Ñ’èu#EÕ{0…\”o¶`ïI¥0>£<»ŒÆ—J0´•FÓØËݤW4SmM20’ØÇ6s)mÃT·U“×ë`³Ù̶¦Å/4ãÈapÅ[Ë–Ñ)M–ÝxÒ÷r°­ŒñϽ3n_ a‡aoÝÏÞŒn*-fÌ–Jº³ª'¿Þ¬ËÀÖЌɻ‹ÙŒÙZÇ m?Õ)@ÖºýØUXÌùlk1PP”ø& ܺĚÌûæ»!"""""""·Ô¬NûèwoòLÛ¸Oݨä0ž–ãT¼:|£‚cé§¢áM•è÷™ëîá÷/r x${5?³f’<;7% @òúGÙÇÛìé¸À£!ˆþ÷&""""""r›L¡‹CZ‚å›+t¬ëÓýî‘kÎàþÍY>Éü:¿­|œ– åøçñ\¡shû*ç·oàÄ›'ùù¹4š*ç?žX̱7Ns"á…¯p<œBSåF^ûN ôÂ÷fåŽdI^ÿ(û”1$ƒÝÔ•YÈ7›1ç[(«óÏw£&êuš1›'ùröÎwó¦aw™™ü:?»7D§côX/N³³µ•þñÅú[±šÍ˜íôÿV‘»Mìó?ÁWt˜‹¿Æ€ û˜ñ7AÛo×gŠAv³“y}rz°›íx‚㾑[fV‚Bgß8GwúJ6§Fÿ½&g™Cç82ºŽô©³¸?7`{:d–üØß°cE| ‹°å£Ç\Ê:cùÛhÙûWÝ3Å•GÏ2—ñW‰Ün¶rGH^ÿ(?}b4qî*ÇÞ8Eç ×A—;]€6{%í)ôùð51Ü^‹Ý5!1ïr>|>>ß~Jòª9ì‹ýÌ‘;ßÍ›† K6éèf`ü¡P]GÓÙe3‘@Y)n¼ã~ ý^7¤§ß’ÖŠˆÈ<‰ßb_wÄ0cÛÞYG®¿–månóÝ®QƒÝÔØkñÎw;DdÞÍBPè Gú.ÃÐ0ßj8Î3 Çyæ•§¸ŒëØ‚Óç.ó ‹¹oåè9÷‘¹2¾Ž¯œt3×¾ÙóDÆy÷m~òæèœÆE<õd&–Ôym’ÜjÁ^¼'c¹v<>ž²ìynØÝ)¥ÀFqÄM׸דÁîŽæ”Qx­ÛS±Ùòp‰ èûÿÙ»ÿضóûÎóÏéLÈ41;›HM"^sÑ„šKE$]~“9E͹âH/ »¢œ4@MáP ØŠƒ«©ÃÅÊ5’Ò u ˆÊb¤taåî,rƒ3…ÞPXêæèùêÒ‘Ú\¨8K%jR‡nZ2“úþ eQEI–4²­×†Ò÷ûý|ߟ¯<|õæçóù.8lïN°"""‡aqŽ Ð¶4ÆtªtÒÑPÊFé陯â<Ò¡ˆÈ#àðE¡;o“øÅ{ø 9¸ùõöîüÝÛüàÙgø0¿âgw7ú9?û‡CŸYäHÜ{scM! ~ +9]Ì,ñ0ý±nÙÊù$‘ Ã0ðÂÄr{Õ†Š÷G‰†ÜÆŸðŽº1Œ‹£ËY"n#K¢í<.nED¤¦˜!äs?˜®Š-S¦L6Ò<SÊ‘ âs†‡`$ÉòƒÔw˜œ¼ÍŽ XÈæ×ÑFjÓ®bó1ú=ÕþÍ(¤"=†»‡‘Tasÿ3†“1Â~ëîmPf9ª»6ó›SàzÎÂÖ)úÕ߈< Yz‡ïeïaúíó»ïݺ僟ýžò=’o¾g“îߨðÕ[yîñ÷Þü7Þ>Ü™EŽ‚ Bò€ÅE÷åsÀ=²½¼àég&[7©½”b´g”´#ÊíL†XO…‰Þ‰ºû²yìWÉdþ”ÿ©+€‰YæÒÕ»ÇòÒŸ!s{ÏÊ(/·­e÷P9y³¥:-:Ÿ?¦uÒ$–ÚOe¸=~žÔè%Æ &Sn ·17”xo-íNÚ÷ÓÜ4½s.^^ÌÉ$éc”èB}ôû=gÄÐôÝd1“áõë—YŸ˜"¥µƒD ‡+ ýÓHüÝÓt¶Á3âßûQ‡döGThÁÿ¹à»ûS.›žoÂg~óPg9¼õ¿å‹*Éf¡7‡;±›€{Y¦ú/JV+"¥ì ðxXëy/.–H¤ó›MØ=86>tstÒ× ÉE„ÑÔØ IDATJ”YšŸ£bêÂç4¼­ƒôÂé¡‹JmN‘lj ÚÔׄpð¶›+ç;]P™%µ²Ï>¥v–Í)dùt‚•®ž7Üœ Ky @6iÂãÔÿ«""O¼ô(êlÞmai‹èë¯2`J3 ž^ÖX¯Ð4—L­BW_7 `qÐÝ׫S$ëÆ±â¸´Lbjšµs\~èE‰Þ}¥ÔîP¢º¸´¹k›–ËCŽò)Q|U"Ÿ![ÿ •ˆœ(…DD‹ç*±á.JÓ—ªëø†È¹†‰ {ª7?ña\¹0/—¦Jt}ùaG³›F n¿€Ž€ 󾃢­0;ñªÃ‘Ú®Ÿæ9t,â1.]ïdxr‡ù]ˆ«! n¿‡{•Nzö¸Q¶9عDDäT³xï´“þÂÜ£”ý]tÙ\~cÆùØì 4s«Öƒ>Ã7HÊ>ÌÍØGÂ4R¿’ï%’mW¦ÝcU$2ˆÖ-[¸Í5z‰ïwy¢i¤ˆÈ'ÇBb­ñÔ1‘-4-"ò$ÉE1zã˜Z\\y¹û¦."""""'IÓÇDDDDDDDDN!M9…T9…T9…T9…Ty$äˆÆ–/pŒ\é€m„’÷µ}¯ýSõÜÉå-?/’ Dsïz@""O¦b’ÐF^J±™Rªï·Õ|"ùî'‚&åÄêW5?œdþÚfãú>H\ bwûF’äËM[9* ‰ˆI$èÁ0 <0±s¡k¹§?J4äÆ0F8Š:Iã¸Ëd#n #ÂâF'ÊY"n#<_JWÊ‘ âsW§m#I–Äs„±Ú츀…lþp9b* ‰ˆ<¢Êù9â³`òzqZp`6K90C*ó*a{š‰Þ(ÙýÔRÊ9&ûû™f€›™ ™Øe˜$4³ü°]Ø›©î‘n–‡&7oØ(ì'ëz™ùL†ùˆDïè‘üñ "rú8qza-»B‘<+é ¸ìl(Tšg¬ n’Éd¸9ÐBzbø2Pœglp–Òåëd2®u®3;8D¢ÐŠò:ÝP›25‰ßœb´g”´#ÊíL†XO…‰Þ‰úO²yìWÉdFð$Ç5²kÜfœþ&f™KW“Gyiž¹ t§™òb‚©UèêëÆa,ºûº`uŠDý'û‰UDä1¥¢ˆÈ£¤~aÊÅyf†üØÌß\«Õ@>¿w!¥˜_¡´´ö#Ûƒ2ãì ãHDˆ/×­GQʳB ›á˜ÀÄÆß%""r@­v'm,˜Z WǶ÷ûâ<¡.ÒÏc󇉆\u{¸âäLet|”Þ‹ÏãéÑhVo¹T•ï­s.Œ’VòuC=[LœÙ+àí Mï¶^³¸iÇ´Ãk ,²ÌÏVhë à4Ci} ³e3Ñn¼^[?`¬{)—ª|ج'¶ŽŸˆH#* ‰ˆ<¶îQYoðãÊfݤXÈ`³í} Új³cò…‰cñpeØÊôØ ¹à-6쬳y_^ë˜éÝODä‰ÐîÄc‚t: mNìÛRC1=Gºp¿ÃŠÉT_…7ÓÞsTæ67¯™+çÏq/;Á`¼ÁãÚût÷õ­OËÓ£ÃšÇ 6W€^cn,Î,m\Õ°––6Ê¥ÍÊVõµ‰6ë@’_! xG²²·ˆÈ‘QQHDä±aÁfò+¬–ü"ÉFÓ²3ij% Àksi0uáÙÏ=¨ÃKß9HϽV]\º´HÄmàŽl›ÖuLZ}W¹j™e6[ûÍE cDr™PÊÆ™Éºð:õ«ˆÈñáôT_™¿ä>¯ÿu^…¡GЧ×,‘Êö÷â ¼ï"LŽ©‹ýLå+‡<§ì—Ëea,šä]®ý‰ˆÈ.lÎv`a±n$gy‰Ô`à´XhGÇâ$ miŒéTi—rÿ "òø:ž¢Páûü韽Áï¿AÏŸð½ú©]wïðµé7¸0þ¾òm¾úÚÔÚ”°ÿðç9‚ãÕc’w¸Ç:ÉéŸ0ÌÝzƒ¯,Q7}lû¶ä¯^yƒ+ß¼»y¾¥%.L/óãcé¨<ÖZ>Á—:ßÏo¨0ôèÙV—)ç“D‚žSËb¹Í›¸òrŒÏaxFfˆö×O{Ú9ü»Šô¹1 ·/H$U :d¾—8@z” Fˆd±vl”hÈa|©ÿÅÀpGÈnÜ)çôþ™å‡ìèÆ9&I΄ð¹ ܾ±åÍjw1;Së_uÈ{(¶¼uÊU%ÏkÑ cç±Ûí÷:öDz”Þ…Ú˜-8À@aŒ©ÝnÊ‹YfB>Üû蛈ˆ[Þ6`.ÅÒÆ[îJй ´y;°mìWÊ‘ âsµü›dy·úÊŽ\¼mŠr.ŠaôÇæ‰ÕÞó=ÁÉźœÕ£>4Ëgûë§°ÍoÆ÷ ß*ÕÇ\&qc7b(g‰¸ Œð<¥¦ñ4jû`¡Šˆ§c( ½EâÏßæ'g?Êÿ3ø;LêŸyãuÛ^ù1ßmû(7?íKïãoþËHÜÙØþ+Þ([˜ü ù9 ,ÿ„Û‚¿ïÃt>ÃwÔŸkû¶_ç³Ï½—ïýÝ[¼]Ûã{÷O|ü_¶ò‘£ï¨<Î<÷I&Tz•XL¥Àä%àn…RŠÑžQÒŽ(·3b=&zCÕ©eå,ÑÞ Òö0¯fRLúÖYÌ6iº8ÏØà,¥Ë×Éd2\ë\gvpˆD¡ÿäuº¡6Ä|ÿÆGƒÙ<ö«‹d2_æÿmTæH­T7²IVh#àjßýœéQ.l[Ï ´}±†ì ï$óóãx×ÓLL¥ª7Î¥yÆú§`à&™L†›-¤'‰×× ²YÊR™W ÛÓLôF7‹V[.ëþ¯ãxû‰f×ñ¨˜ÚééfyhróFû‰Á~²®—™Ïd˜ØHôŽêfZDäXµã ´mÉuËÙ•-¹n™™`/cãó2·'ñ¬Œòbp†‡ýˆ »°†s|žÛ/wQYšetlßdŠÌõËX² n$¿fùl¿ÌÌùüæ”ñù~eËÎ8ýLÌ2—®&¡òÒðìÆ>ÀÙgë·Užzcë1g7þºù5Θyþ÷~ÿÙŸò|wÿswyý™÷ñ¿Ÿ=d›òÄ;óÜ'ùRáÛ¼ø­w¨†î0ÿÛ­øTO|÷¸†¹5éÇœì³VjÁÖV}C(—ªŸÊÅ{êô® ùES›u£ÈrS³ß[«—+sä&²¤ã£¤ã£œq^áÚË=´ïöþÓbª«ÌN|]&fSY–ûÚH¥¡mÀE³šÐ¡ç ]üY{7WÃa¢í.¥wݽÕjÒäóEplݶŸëØÒ²ññå×ñH™qö…qø"Ä;'ypÚRžZx&0±cj¡ˆˆ1‡—€)N|a‰¼Ö¶æºÒúfËfâÜx½¶þðÃ9¶¶}í×,Ÿí[¹Tý ÂfÝ\3¨>ßïÐŽ'hgjlÅ‚…ìl…¶N3÷OÓ¶EDNÎÑ…ž}†Sægwg~ÎÏþøàÆ6ÿnàSüî{·÷Öa›Ö{†Oÿö¯ówßü)Å/àãV>vD-Ë“ëÞ›ßã‹ßÚXüê)žÿìY„NˆÅ9ÀÈåEzoL18åàfØY-Ý×3„·:ÈÙ€4ùB­À=*ëÍÎ`¦½ç©žùÜ"‹3ÓL¼6Á`ÜI2¸ŸÍtø:1Í&HL:x6š:œbzŽtºÃaü(4Z»²Y')òØl­Pk´éH¯ã³x¸2œàâØ fªCú-6ìdY/xÐQÓ»—ˆÈ©dÇÓi">›$³°‚‰nçf®³´T‹7åR™Ú;víµ‰6«…ùg‡Cæ˜fùl¿ò+¤¯Óû|™Í ƒ1æÆ*¤ëóÓxr ˆÈ»ãè§ýMº£ÂWoå¹Ç;Ü{óGÜØXàçì ¼¯Âþ‹5îÜÍó•É7ø|ýÂÐ ½ÓÓp¯üûÛÖñ,þr‰ÿÝæùk5!iîÞ›ßãÊÜ?ð} Z²ñ¥ßÓs&NŽGßÝç`->ÂT®L«;€×©…,%  a~¢¹2Ø=t™ =÷ ”Mk0uêå†A(QÂæðÑÓW}ÊŠ­¥hÁîÖKÕ÷¨Ý"ìðÑiZcvvaï©c‡d¶T‡É E(çˆÏ4Xè';C<[ ¼6—SžS¿Žô:ƒVßU®Zf™Ýè¢ÍE cDr™PÊÆ™Éºð:õÿ§ˆÈñ2Óáé–˜Mƒ)€·®Øavú¹|f§ãäJ@)G|zÎõp6vkÁfò+¬–ü"ÉC䘦ùl?JË$¦¦Y;7Àåƒ,ðcõÒ}Òé4؃xÚ(‘t M·àÿÜGðÝý)—Æ¿MÏ7á3¿¹±íC>÷›ü·k?âÒøüþ׊ܱ}„ÿí÷žmÖ ð/xþìS¼~{‰ÏóçûØÖÆ ÿò)~ùôûxáGÚ9y¨ ôˆ2;éækÄGâ,[< džqå¼`\š*Ñõåk„f0; _¿‚k%ÊE·¡œ³IÛíA&_îÆë©®_ÔŸ màÃþVÀгËKK~‚KF±ünñ9ñª ¶ö1u¬ÁBÓ›OGkÎâ`¼ÓNú p_ŒRöwÑdsuÁ9ݴ̇ð‰®x¹ Óøž|뻊·vC9;×>0~ gú%|†?R p=Âöå˜DDäè™n¼µ×¦Nö­ Í\çª5Å ÏÀð ’²s3Ü%'Ú辂³§ÿ¡Å¼‡É1ÍòÙnês±ï%’mW›Ä»ë‰qû«W¥#àÚ|ÛÃÄ#"òˆxêþýû÷O:ˆãðö7¿MðíV^ý7ZPHv±þ·\¹þ÷|PAèIR$ºÀhe˜W¯ù9®5—gü¼8¯$ ãH!‘ãr #…En¿ù+|Ïío±:9¥ZÚøÛS¼G¡Ç[qža`ø'«#oJKdÓ`²ÛŽ­ 9¶=‰EDDDDDäqóäzk™¯ÿœJûG˜ðŸÕ*ÿ²‡{üðÎ/ùØÙœt r…T„¡±9–Ö+À:ºÂŒ„ýØŽcÔv.ŠÑÇÔâbàåqzv}d™ˆˆˆˆˆÈ£íÉ+ ‰ˆˆˆˆˆˆˆÈžžÐéc""""""""ÒŒŠB""""""""§ŠB""""""""§ŠB""""""""§ŠB"òÄÊE c—¯hÈ5 ŒP’b³† )"AnÃÀpûF’äË»žµÚfý׎cöyÞ½{¸G;‡9O‘dÈÀ0¢äãnª±&sl½”ÕóFç¤""ï’ýä‚#hÿÐy¤‰ÂArߣìaïÂ5>6Õ\J"òb’â0MˆÈãAE!yb9Â2™ ™Ìuº\ÃÜÊÔ~v쳕<±Ð ³–nf2d®u²>;J(¾Üü°úsÍGpdG¹ÔŸ ¸.=qVoD™ËŸt""Çä±Í{ç¾r!ËLtþñ)˜<¶¿ ‘㥢ˆH3ÅéUÀfÅ à‘ÌdHÛ÷߆ÅI82@ÛÒÓ©à œÉ™ôÓz·axF’,׿Ö½V§<¥Ñ ÉnÃM(Yݺû{}-ÿôG‰†ÜÆ ß~›æ¾"ÉP/q€ô(6¦5íKãó6Ëoõ9)™!Ú¿uJs³|¶/òò–øêsu™lÄaDX|`–ˆÛÀÏS:Šk~X…TíünzFæX©Ïᥱº\:“­Þ},Ïøq×Ï×.¥2‚$ ìœ>¶-o´Ñ¬ý6bó1ú=ÕßñÐ|¡zÏã10Ü=ŒÔÝïlŸ²øàßÃF;É™êqž ‘d~ïm{Æ6Bt²·á&šÕý…œ^* ‰ˆ4cqÑ}ùpìD//xú·Þí—Ù‚ Ÿß9Ô¾8ÏØà,¥Ë×Éd2\ë\gvp¨zSVšg¬ n’Éd¸9ÐBzb-³×²YÊR™W ÛÓLôFixoSJ1Ú3JÚåv&C¬§ÂDo¨zžr–hïi{˜W3)ÆÛ—HdÞÍ3µÓ=ÒÍòÐäæöƒýd]/3ŸÉ0±‘è=ž›i‘wÖ\°ÌL°—±‚‡ñù ™Û“xVFy18Ã2@9Çd?ÓÔ¦pÅ.Ãì ¡™­Ó—ï墄FÓ´u_cÜomþ^¿!›Ç~u‘Lf¥AœMs_+þÉúiÙ“ø[÷èK£óÒ$¿mËI“¾uësÒ~úx ßÅ^×ÅŒÓÀÄ,séj*/Í3W®€ËQ\óC)dÍ{Û™E®ùë¯WD¨—t-—.^÷’ ’(@»+@KbáA¡­”]`á|7^kƒöëòñâuÙÐT-ïÞ~UšÄR;ã© ·ÇÏ“½ÄX!Àd*íá6憵óH AßM3^¿~™õ‰)RÅÍv¦Zˆ¤2dbÖÆBLåÊ{lÛ+¶9r-3‹„æ£ü…ˆWý¶.ºÕmXýµØÚ]ZRd«UHr‹)ºz<ì¨Ymkk€ÉÅZq«YûÕŽÓ¨^‹Ý³âÄë­îÛjµBeã†ÆJ`f‘!·‰R1ÏÊJžwH&}þêÿ V?ÁÀ‰7;»lÛGl~·í¡/»È“♓@DäÑgÆæ!æí#;=DèÆ õ; IDATé±9ˆý.WM¹T-xج;×&hõ2peŽÜD–t|”t|”3Î+\{¹‡öÒ<¡‹_ kïæj8L´½Â¥±ô®§iµÚ€4ù|‘íÁ•KÕO7ã½FuØÿ†|‰¢)@KËÆ­àL-ûíÜa™qö…qø"Ä;'ypÚRžZx&0Q‡("òx©Ë¦õÌ–Í ¯×ÖK×W¨Pÿ¾ÜÀÚ*«ÙÙbkkó÷úZLœÙ3Øýç¾ÒúZÓ¾4î¥Q^nz]ÚñíL-°X°­Ð6Ài†â‘]ó‡´#_¶bµÕo›å¥çg·Ó]]ãдJ/´­“š àj0bfGûÛ·íÒþy€Lûêx™åØKôO,Óbwât™·§œØÛ6¿;gwQYÙcÛ‘Å&òdSQHDd¿ÌVœ¡q®®\`4}ÀÊD~…4àuÚadzNÌ´÷\#ÕS"Ÿ[dqfš‰×&Œ;¹Þ2GºÝá0~-êYÙ¬“ Õ¶m¶V`më~¦êº¯gØñ𵜠H“/ÁÑ Ü£²~°.ŠÅÕáÇfè4SÒo±a'Ëz ªw†µŽšÞŸDDŽR].°˜ªÅ–Kejïzµ×&Ú¬ZMvL[Þ—hëæÚ¤™K£ŒM/âr7¯'·ý{ÛGî³´4ïËŽ|Ó»ç·V›¦9©i÷©i^nÌæ ÐÁscÒ´1àjßG<qÍjG¾,±¾^¿­‹×‡p7¨÷Ø\ZúÓä²}†öÕþöm»´_Ìï¿ÅS%ún¦è±…$ý7êvÈWûTû_au%ÉÔ×|ÛQÅ&ò„Óô1‘&J©܆Ahczi••<˜¼Nlûnd™ÄÔ4kç¸Üh!å†A(QÂæðÑÓÀØZZ0[ªû E(çˆÏ4Xè';C<[ ¼6—SžS¿Zݼ&H-d)Q]˜Ô0üDse°{è2Azî5 @)› Ö` Úqjõ]åªe–Ù.Ú\:H$—)¥lœ™¬ ¯SB‘Çж\`vú¹|f§ãäJ@)G|zÎõpšÁá¥ïÜæû2¥E"nw¤nª­ÍÎ9›ŸðÕ*³QâË{¼×ï7Ô=s_ v'°^âìÝ—šæ·=rÒ¡û¸W^ÞÕK÷yH§Ó`âi?¢xËvžnדÓÕó—r f6ê)6ŽYâ‰j.¥”%p3´±@ŸÕ…¿%ÁHd•ï.OVÝ–)-õˆå÷Ñþ”ª…'Jdã3dë?õbD"U]ÿ©d&qŽ>¯£ù¶#MäÉ¥¢ˆHz” †Q}úIí+T{܆Ås•Øp¥éKÕm¾!r®abà æÜïÖ¦ï%’mW¹ Òðv«=ÈäËݘb=Õýû´ \cØßŠÅ3Àx§ô.ྥìï¢Èæò›Ç;ݴ̇ð‰®x¹ ÓðÜâa86Œ+æÃàÒT‰®/_#ì0ƒÙI8vïJ”‹n¡œ€«AÇÊŠ`€sußƯáL¿„Ï0ðG ®Gôx\y|4Ëf¡™ë\µ¦ô¾ARöáº\ÑNðÚ5˜æ’a`øFYé&v²ý-ÞÖ¦»m•©©$Åfïõû´wî³âìòÒ’Ÿà’ÑClm¯¾4:G“üfv¾~W-' åìxœ[>x’—w¿2¸ý^:®Í‡ŽàšN+þÈuù!|†›KÓ&<Û¶æRá¸Îðƒb˜•ó«¶® L7nÃ}išrx²:¢gÏö÷ÛWlÌõ¸}C䋽݊yû.ÇGH´G˜ 9±Ö0[Ý E¨L%Èî6ÈâÆãª°ËQÔ"ò(PQHDDDDäQb³ã²ùê÷¥‰¡ >·axF’,×׋ŠYfBõSžR4š¼UH†pnBÉêÖr>I$èÁ0 <0±ÜF£µ©CýQ¢!7†1ÂŽú”­o0—bi£ˆ°’b®mÞlûíûÛ§gmLS‹’ƒS¤úcóÄjýõ#$ëúÑc¹®¨±{0[°ÄÃôÇrìÜs[|;¦lÕÅzÐsJ‘ÂJ…޶¶}ì»L:±Jg§«Ú×zíA’™!œ»U–J‹¤Òçèó:®ˆŸ!s{ÏÊ(/gX(ç˜ìïgšÚ”§Øe˜$´mн\”Ðhš¶îkŒû­PJ1Ú3JÚåv&C¬§ÂDoˆ-Keóد.’ÉŒàÙYAÀhƒÊ©•êO–³)*´pmŒ¬Ù#ö‡”]XÃ9>Ïí—»¨,Í2:¶‚o2Eæúe,Ù 7¦{í§õ,.º/Ÿî‘èåO?3Ù& ó8µ©U¯s­»Ziëö`˜sÊ+YX¹1X-¾y‚DR»œ¨˜'»f§ÝºŸ1Eï­+x½ð¹Àö½‘ÇÇ…~ÈWÆßà÷·|e¸òÿ¾søÖwõÖ2ãß"ùÖ1žCN{üðÎÏN:‘†*‹ ¦V¡«¯‡°8èîë‚Õ)‹eXI_OÀSòdëa&“!¬ûË=?C¸?Ϊó*“af ”]`¡¯ `=ïÅʼnt~ó8»G“¥ÚLTHe—©Ž@Yƒ¶5¡ò^±?$“ÓI»ÌvN§h³aÖÖ+°ß>naÆŠqs¸» ¸—eªÿ⃑U»)禉Ä×ÀÔÅÕçþ¯ïQ)­³V9‡£/Âüb†L²s4ÄÌ®•7 &Óæw¹èÖÑNÑÜæ¶-k Ý~o¶wǺC"òx{æ¨ê¼ðþ¸£öÍ?½Åü7~Hßÿõ+fú>ÁGŽê$"Gêß¹ù&ŸÏÃg?kãK¿÷¨Í‘S©\ª®Óc³bZ_ÀlÙÙ±ñzm½Dq}… ÐÒ²c(ϦµUV²)²ÅÖV(—ªS™â½ñú}óuSœZLœi§ÃKÀ'¾°DÞ kÐ6àb£UZ_kûÃrÚö3MjŸ}ÜÁŒÍ?BÌÛGvzˆÐ%Òc rþ'M-¹Á*&¼‘nóaÎý,"OÝ÷n<ž—I/lß6¬§Õ†³-K._Æ_-ägÈ„¡:=®·ÉyÚ tw11¸@.ؾËõ‘ÇÍ‘…¶xï‡ð]ºÇ_ÿ§·I¼ùôÜñœFäPÖ×øzþ>¿^ÿë<_†DDDääåWH^§‹©Z)—ÊP[F¸úÚD›ÕB«Éމ4ùB»ÜÇ´usmÒÎÌ¥QƦñ¹¡6R¤ûz†ðŽ¿îsÛ° ;žNñÙ$±˜…Lt;7‹––æ±ÃÚíߣ²¾ÏPiÚÇ=˜­8Cã\]¹Àhº²ënùÄS«`r s¥~ŽÝaÎ}heÊe°XM«NûëKr{v®+$"§Îñ­)ôþV~÷ƒ÷yýµy¸wïðµé7¸0þ¾òm¾úŸïð½?Ïpá?Ü<îî÷ùüø·HÜ¡Éq ¬çùjÝ~_{ó^õçµif_û‹%‚ãoðûãßâOÿâÇÜcößZf`ü»|åf† ãoð¾óðC\åÕò ¾Ôù~~ €ûÕÂÐ7›Ì9n¥eSÓ¬à²Ç‚Ùéçò9˜Ž“+¥ñéY8×GÀi‡—¾sž{­º¸ti‘ˆÛÀÉn>ÌfçœÍOøj•Ù(ñehuðš µ¥DujÃðÍäž×L‡§Xbv6 ¦õkïûlv ¿ÂjÈ/’\9ø%ÜpÐ>–R#¸ ƒÐÆÂ?¥UVò`ò:7ήWH[“‹áaß–'’Íõݧ|Œ€?ºùÔ°bй9ÝçFM{w„Àò¡É,…Ú1åâ2É‘1¦l-»œ§œg!>ǹ>¯F ‰þæ¿ü€ÄgøäsïÃtçç|§vÔ½ïÞãZxá,MŽÛ~®·H|ã§üMÛG¹9ø;Ü|á=ÌÏ}ŸÛ>Yø‰¼‡/ýÏŸá/?÷~~òÝñµ¥ÜGû¾kþ(·?Íÿúéý-Æ&—3Ï}’ †DDDä$¥G¹°±¦‹ï%’mW¹ V§a™„f®sÕšbÐg`øIÙ‡7·ÓNðÚ5˜æ’a`øFYé&vîx<¹­3LwÛ*SSIŠña\¹0/—¦Jt}ùaÇÁîyÍ7ÞÚkSgm‘åÍ{ľÎá+8Kqú_ðZlÁë:P8[°ÏUbÃ]”¦/Õ~Cä\Ãƨ)fHTÒ|á¶5yŽèúî‹­‡k#-Ì\tמÀ¶@ǵþÝÀ›„¯jI0T;æ…GIY.s}~†@Ý:R[šö…IÙÆ™ 6þí‰Èãé©û÷ïß?\?ä+ã?ú5…j¾ó7óaþÒý z¾Qæß |Šß}/À;üõ7¾Íü³Äþõ}¾ú•ÿ ÿúÓüÑs¿ä¯^Yâösü‰ó×áΛ»ç¼ËÀ×ÿßü+üåê~ø‡Ÿâ…gŠ$þìÜ~îãLu¼ÅÀ×Îo]øþ¸ã™m|ñésë¿k—ó._/ñüç ºÏî É£ïÞ›ßãÊÜ?ð}žây­1$""""""O¸c\ìçgܹ >k‚»?ç'Tžzcë.g+ÀÇð?÷_xó§ü‘í¹½öëø_üõêö»ï49®ÎÝwø Oóg7~ð¦g€_ýsíû§ùø‡6ºú ûàÓüòíý´ÿkœÑ¡SáÌsŸäK…oóâ·Þ¡:bèó¿ÝŠo·á³"""""""¹ã+ ­¹ýó§xþã÷þ=ÆT7"g«uœáƒ7Æwþß_ñÝÿ þdcóÏì~Ü[uB|ö>L™ŸÝž¸OåàéÙqÿÌïx‡¾ý+ÞóôÚ—'Þ½7¿Ç¿õNí»§xþ³gU‘'Úñ¬)twäÜ]¾÷ÁÐý àì ¼¯Âþ‹µêÏwó|eò >ÿÍ»ÕýÏ~¿ùüûÅ_Òí¬›«µ×qÛöK|s{¼Ã½ïü˜ø/žÆ×ñÚ÷™ÿëïóãw€;ßçëwžÂÿ)ëþÛ—'š¦Ž‰ˆˆˆˆˆÈitd#…æn½ÁÜ­êë÷<ýŸ<ÛJì’ð!Ÿû?NüˆKã?â—<ŧÚ?—~oc¾×‡xþ?âÏîl,0̓ŸïzÜ[?Þu?ž~†@çÇñx àižÿ/øü|ƒ;<ç³6þèÏ }yR© $""""""§Õ,4ýˆ{kysAêt0òHYÿ[®\ÿ{¾ ¨ $""""""§Í1>’^ä×ÒÆØžâ=*‰ˆˆˆˆˆÈ)tŒOyÔáÓ—žcúÎ/ùØÙì½»ˆˆˆˆˆˆÈäÉŸ>&"""""""";hú˜ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢ˆˆˆˆˆˆˆÈ)¤¢È㤘$d†›Èb¹ù¾¹(†a`„’b²3ý„“‡oé¸å¢†az b‘“ónä‹b2ô®ä$å>yX* ‰<–*ÌŽM‘Ý£.tTrÑ ôOe©¼;§‘wŠB"«µ8‘éïR]HDDDDDDž0* ‰<–\¸\°zc„øòIÇ"""""""#…DK6‚á«t°ÊÔX‚üC´PÌÆ úp†á!Ð?D,»m-‚ÚF½ñê·éÑ †Á}勆¦qEªœØe[!Ä0 ‚‰BÝOK,ÏG ÷ìO]LF4…#=n ï'Âb©I‡Ë9¢Ã0’*4ÙQDD¤‰bŽd4LÏ]]»Ï0ðúŠe›¯áWÌ2à1 ·àPŒFi®~ÿØPŸûçØÑDŒ¡þÚ9 ·¯‡p4INK‰ˆHŠB")“-ÀÕs°4F4y"GÔH€ ý,,­×Ö ºÇjv‰þ‹ôLî=%í×lºL°–H³³ôSfi~ŽÕ…¥Ū"ÙÔpžn¯µ¶{žDÈÇ‹_ˆóÚÊâ)§˜ 2·R*¬ÓŽÕ²KÀåefúû‰¯šp O2â±îÑC‘ʹ( ½ŒÆ_ce}s¥½{«Y&ú¹Øk•8¡‹ýL½¶Ê=€Ê:K ô_ìa2·3ËR#.ô3±°ÄÆi6Ï1IƒCv¶‘ q±‚…líœ@e}…×â£ô^ 1¯Âˆˆ ¢Èc­½{„Ëç =6¶ï›»BrŒÁ¹ULöN†_¹M&“!“y[¯ Óy®ÂÊ~¢O6kõ3™Ép½»ú­kø™L†Éûßã˜`m¥ü¶”—˜Ÿ«ÀJ‚ìömÅ4 i༷ Ln:ÌXºç¶ÆóêËÝØMÕxFS †ÍÞàÝ\»!“Éðúõ¶Æ=&9ØËÔ¸†o2éWAHDDB9ÇÔPœUÎÑ9ü ·^ÏlÉ¡Þ6¨¬L‘hðˆtüé¶.^~õõê1¯¿ÊË]砲¡©­EžB’±Á9VMv:‡_áv¦–çn½Âpç9*+7è.6ÿ§œef,M…st_»Åëµ66Ï›fjNóÏEDDE!‘Ç›ÙAßP7m•4£)šÍžªÊKƒ©‹ñë#øÛ7†Ö˜im÷329Œ‹ ³Ósì5öÈÑÙG+$¶U~ÊKóÌUÚhkÛ¹­˜^ t3dŦŽ ˆâÛT˜ùN1eÅÓ 6[̦ⓨð ‚ó©!¦?ÚR ”þoÉó1“izÿ¹®ë¾Ù™›}õz_÷ÞIÒub($}ç‰ifÆBt6gYÚzÇ|òf•rÈ:/DéOIÛÕw¯70˜f<»•ÆÒnõ Ì4ÓiØ-7N•RÛìR#‰nçÇÍ-6ÆòdÎÉk©“Qà Æî[¿ü¦IÄ.àsš¥ÿxöœüCîçbœ›I’tÃE¶êuê¥"Àq»Ía«Iãi™Òì8wËÝÃ:tÞ:5”›äí—è'“`³±{²­Iµû fäü5‰îƒšêeêp¤&5™]Ú¢uè»J%Içûû×€¤&UX óä.‹Ëd¾ ý8Üßíþªº>E°~Y› v`äÆeÇÄHæ†`±Jã0Gÿ €&›å™…)2°^¦ÑÊ‹ñªt,”Ïr’ Ñ>: 1|Q`#ž6ìoüBÝGßEkÅÛLtN~1]v’$]Õ!ÕyfWj½ýÐxûE"=÷Øpÿ q`·µÏ! nî³Û}P3uùƒšÆeêH’ü6ßcû÷_ùùwà‡nfǹK“ˆ]ò•$õg I×A$Ío3I8XçÞe憎àØó¼Ê¡ý™<7©±Y;ù×Êæ&åN’äP„ÈP’$»TÝ)>ÝÒ±(“£Ãoµº0µ ùÀé=N‡ü ÓC°÷{‰'­kG’¤®}*…[L-ŸB? OdÈÿ6ÇÃÇò¯ü4ùCˆ¾76\ùAMçÒCà Vù÷ÜC?œlz¾Ç³õy¦~þ‘Ôí{<õEœ’$œ)$]7²EfÊ?³¸¼H9½Jî’cCw±Ux;œyo‘™›ðjƒvv„Öf™Nr†ä €$™$ÌWjìçnÒØ¬Atšä9K\AÓþÀïÑüCV‹ ";m*¿,³Xªps)Ë¥“Ÿ$Iº@»ºÌ|­cük©øz} Íiôy‡£ó¶‡îðh«ÀÇ=©# fgYÍÎÒn5¨n=¥ZÞäÙÞs:»OøÇT„þKfK’zƒ3…¤k#Fnfš¶Y,UØ?'g¹‹:»->Í›h#¤oz¶ÉÖa·t,ž> ^nHÁv•F³[:64žát&éëN§oÔv/X$»Ån A¼ïÜ.‹t×SÌ3›Bm‘ç½ÅL’¤+h56+¾A›£sÓ®ÆîÁ¹ÛÛ­»@(ë>;oĈwÔ´>á+ã#±ÙÛ³”ÊUêÞg, ”©î¾óTIÒ5g($]'ƒyîußQÏâjëíýñ£! ¶ÊïÍs¦à7(e‚`–«æ'á¡FCÏØ|°N¹'wjõçþD–85J…5†È&ß\c!<œ"°Y¦zÎ4öã­2+@4Iüƒ_&1Y$ê°9»Ä»Öâ–$é½íW)o^¼»S®œóüi±¹º „MÅO¶ÅIuÔ¬þÞ<çµóÇ4JY‚ `ö’õñÖAÜ;çÕõ7¤>ÁdaIÒõ`($]+a†'gÉG;Ôjµsv'ÈM{¬OM±ÔØõËâñ~ƒ¥©ëÍçIžZƒ2t2£ç¨}Î?ƒ†ä&£<ÛܤÍ0;µ/6D& ÏŸ?‡›ç¼aìd!L¨1_¸Geçå/¸Çìo•˜¸»A‡ÉéQΩ:»ºHšé™$t6(­œ÷K¶$I—‹Æ“l”Jlí¿zz²ßXeêö<ç¥) Ý5ùäÅ‹E‡#Œ…€'³Ì–wxýò±6;•K›@h”WY”$©g¹¦tÝ„LÏŒQþuãÜ•zÇ—øgó6ÿx¶ÍïS·øýÌþÐÐ4÷§‡ßxXl8`÷Á/ 9÷'KÙ׫ó &sD——9J'΄7ƒ$sQ–—¸™Iñö¯¯a† KÌíþÌ|í ó¿I›’¤ž¦¸úˆ™ÌÐÉóðäÙùÏót­@*•î–hW·Ø=;%µo”…?2Ç‘•ÿ“üÍøm$2¿ñðÏ2EW˜–${ñâÅ‹¯=I’$I’$}YÎ’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I(¦W6 IDAT’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒþþµ éjþïÿý¿_{’$}Rÿëý¯¯Ò¯ÏTIÒuò1ÏÓ¿½xñâÅ'‹$I’$I’¾–I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡¤ïX“R*‡¯·V A@©ùõFö¶“±*^õØÓŸÔã ZÇŸª¯ïxg©t@ÌRÝ«P8ósüb¿bß’$IÒWd($騱¸²Å¥yÉŽ÷¬–ž~›!JrŽ?ëuêõ:õ§ 7æùyªLëkë“hSûý¡âÔë ¤ÿŸ¯=I’$©÷ Iº:%ÖwÞ÷¬&Ë·¦Xnu>Ç>­H‚âÂ4ÑíEVªí ¦X¯S_Êrã‹îCÓnC,Öÿµ"I’$õ,C!Iß¿h”({,/Ÿ_6uܪ°0ž&Ò¹"kÍ6pH¥0Á:@mžŸ‚ÿá‚ì*Ýlé§…€ Ȳz6uËÒnSní&åÙqFRAf|¡ÂΫ¬æ¤ŒkªD©"îq6ÇÙ¯H) •ý«_g,NØl´.èãtùØ1…A°ÀÖË)TÇ RAñ)í ïËÕÆÿq©~b¾ëÁyu~ûUÆGH”ÎÖvº3Á+‚)J« Œ§ßc@»ÉZ¡Ûfj¤Àj㛜&I’$}1†B’¾±IŠÓP[deëLY»ÊüíyjÃ%þª×Y»ÝáÁDòþ ²KÈÃI™Öÿá¿37á`“íp¼K£pÀæv h³]«A^$=gyq“­ýÑé‰0^å¾\ÚöçtÌÎÚ¯L=Ø¡/ž ‘ óæD¥>Bì‚}í»lðëm¼¹=߆›ŸbÌ’$IÒ÷ÇPHÒµq#;Íôj彃×CÝ?òê¼U¥ÄÙµlúIdã°¸Ârë€P:Az8Bœ¥RPšÄ DŽº¡ÓqûNæút¿‡ˆöG€.Íóp)ÎêÏó,®l‘™M]}¶Pk—IÄáŠï ‹%s ±È“Å5¢L'»;Þë¾|a‡U–´™|\åv د0õûæÇµ‰gŒéÿÌ’:{Ã[×¶$I’ô²|LÒ52H¾8ö2ïàF*G&ÕÍmº <A–Róè#žŽÚ–Èg—Ý]H'b"í°·w@(—a'²Ü€•ušm Ýd}e&É%ÞñÄâ IJg†ÞïiíÊË+ Lsç}øéÏ¿ µZ â㤯r_¾mº“®Ú4ÖWiÐ9]?öþbIrC¬—wè6Û ”K1ûiWЖ$I’¾+†B’®•pj’™ä© ‘4sks$›E~ ~^n3öχ‡Ã@?‰± }­üÜf­ÅI$9I¤C@ˆÑtü¤“a «˜é¯rw$ ¹K5>Çãµq¯8ÎØh‘|ôâ7¦'oE ‚€`äW*Ñ™÷êãäÊfÊ%‰]é¾|e7ÒLÿãÉT@jd–fºÈô@ƒfëc«~r÷’¨ýÊHdØÏ=bîÓ® -I’$}WþöâÅ‹_{’$I’$Iú²œ)$I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ ¿íHÒ×Õ¤L°žœãÏ¥,7N¶V ü4_#ÿ¨Nf3`bý‚ÓóxÄÄ¥ûëEº}¼±ãnNrïÞm†#—Œë½Î‘$I’¤«3’¤w.Ö©ᢠ޵¿ÙýãÔöãV…»·ç™˜œ9öŒ9G’$I’®Àò1Iú ± ùQ Vc»ýùΑ$I’¤‹ IÒw«I)¦J” )‚àÕ6ÐnRžg$iÆ*ì¼ ‘vVÉAñ)m`¿R R*»Tg‚Ôã“c[enÙÕ¯sy’$I’>+ËÇ$  6ÏOÁüëî¸õ„õ e2$®¸>Ð…ç4ZÄÿØ¢Þ°Ãjn‚åÈ4ž®2L“Õ¿Œñïò8ƒƒyîÝ©0ñû2+•ç´k„’sÌdãôõÁ檻³$†a¿Qa—(ÓÉÁÏp$I’$}mÎ’$è®ÝS¯S?ùü9—üô}Ôæù)‚€ÿº½B{l޵¹4—fBW9'žf¸¿ûõx«ÌòŒM滋QG†ÉOŽÁÞ2å­c Ìðä,ùèëó‹ÔH23“¥'Ҍѡ¼ÙiT·!šÃLH’$Iºžœ)$I_Ê[ P¢súBüpòµ}t@8~µûå÷ƒ£6†p‚Ñüëö 9J²ÿå FÆBlTìLF©Ö :ÄLH’$Iºžœ)$I×H¤/ ÀqûøÕ¶î÷Ñþ“ùEû–ì …àÙ<ª/ 342Jè Lyéψ’sš$I’tm IÒ5Nd¹3+ë4O^_Ù€Ir‰0°OeqžZ(ÉÜÚCî tØœ]äe.a4tÀÆÆ¦¥c’$IÒ5g($I×Ix˜Âê#fú«Ü FîRÏñxmœA`¿²Èb ’33ŒÄ†™œÍíl2»X¥ NÍ…ˆæ,“$I’®³¿½xñâÅׄ$éÛ±³šå—e˜þw…qS!I’$éÚr¦$é”&›åKÇ$I’¤àÛÇ$I]ÍÁÄ:¡¾$¿ý+oé˜$I’tÍY>&I’$I’Ôƒ,“$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ ß>&ékR &XOÎñçR–'[+~š¯‘T'³0±~ÁéùGÞØñ7'¹wï6ÑS›÷«,Ì.òdûˆN¨¡Ñiî³Ä—Œý*íJ’$IÒp¦¤ž6\¬S¯×©×‘HÎñgýd[qøû_9µý?‹ô×01[áðÕ-Ö wÙˆLó¸^§þp”£y ë;—ðíJ’$IÒ‡1’¤O,Ëj5¶Û'›Ôö€X?ýÃ*õ:•ñÁkW’$I’>¡$} á€õ"SkM>M¦Ó¤S%J…Apjh7)ÏŽ3’ ‚4ã v^v¸³J.ŠOiû•A¢PÙ¥:¤hŸÛ*s;È®¾c6“$I’¤ï’¡¤ë¯6ÏOA@pòùi¾öY»;n=a}B™ ‰—kÿD’äï Ïi<˜àÇô«÷+;·]€F‹øÌõú=Ò‘VÇ'XÜOsÿiú_K¤wçùe|•€Á<÷î À³eV*ek„’3Ìdã$GÇ ó„ên·ÙýF…]¢ä’WŸÍ$I’$éûa($éú;½P½ÎŸsÉOßÇ©àé¿n¯Ð›cm.Íëì&ÌpaÇs£ÄCÀóËS·(Tö?²] žf¸¿ûõx«ÌòŒM滋QG†ÉOŽÁÞ2å­ãî8&gÉGXŸ_¤F’™™,ý@8‘fŒåÍ&pH£º ÑfB’$IÒõäÛÇ$éS8ó†³ó…‰eï±–™¤±2Ká÷mj‹ešÙÃr•vûBüpòµ}tÐí)òú•f/¿µ0„ŒæX°ÉQ’ý/L02b£Ú`g2JµÑé$fB’$IÒõäL!IúÒÂý$ ÷™IÎ'm:Òà¸}üj[÷{ˆhÿÉü¢ý Ëö…BðlžÕ— …%tP¦¼ôŒg–ŽI’$Iך¡$}íê=RA@¡|R.ÖÞc·¡L‚Ø'ì'œÈrg6VÖiž,:½¾²“äa`ŸÊâ<µP’¹µ‡Üè°9»ÈË\(<4Âhè€MKÇ$I’¤kÎPH’¾€Hz†µ¹1Ú+?w¼™¥™ý„ŒæNÚ%ÙÏ«í#c!:Õ;íÕDsIÌ„$I’¤ëÉPH’>±Ùi¦ààààÕ¶H_€ãöëYAÝï!¢ý‘³M|°+õ³_aùÁ¡PžÍó ú2” 342Jè LyéÏ,“$I’®5C!IúäÉÇÚNd¹3+ë4Oƒ^_Ù€Ir‰³S>Ü»ûÙ§²8O-”dní!w:lÎ.ò2 0:`ccÓÒ1I’$éš3’¤Ï œšd&yzÃ0…ÕGÌôW¹;ŒÜ¥ŸãñÚø§-ÏzG?û•Ekœ™a$6Ìälžhg“ÙÅ*m€p‚l®gY:&I’$]o{ñâÅ‹¯=IÒ·cg5Ë/Ë0ýï ã¦B’$IÒµåL!IÒ)M6Ë–ŽI’$I=àï_{’¤oD³D0±N¨/ÉoÿÊ[:&I’$]s–I’$I’$õ ËÇ$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPHÒGjR ‚ó>… ‡_{x—:û…ã|ym*§8¬‚€Ró óJÞu-§V(Á« 8çg˜a|¡Bëø3û}V(œùyœ»ï²ã$I’$†B’>•äÖëÔO–²Ü¸Â©Çû VKO¿á©ÆâÊ’|û×vÊéŸáÓ†óü¼9¦ýJT¢PÙŸ+}S,NØl´>¼‹œ”w­UVOéq*­îÏå­Ò¯îõ¾.×ëÐzVzû¼sÚÕÆaƒÕ© ûßÇj㻘»%I’$}V†B’>Újî¾ß¬ŸØ$Åé¨-²²u&~h_4Þg®íÿðß™›p°Év 8Þ¥Q8`s»´Ù®Õ ž#Ûau|‚Åý4÷ŸÖ©ÿµDzwž_Æ_J—Üày³Da¾F4ÿûÙþ÷¸Ð3­Ög*«±¼ÙÇBµN}-ÇÁbåæUâºëÕø{œ·Oùîä¿xZ¯³õ(M£°üV&I’$õC!IŸÆ9k -½ªÝ 3<9K>zÀúü"5’ÌÌd9/®h76Ùì@:“ ôßÌd›r­õú xšá³''3Üì"C$’@m›€È¥ÿüÁt¨ÆÒB‘âÊ.pÀÑ{Vt 勌…:l”ÖÙ~ßñžˆ$Ò$Ù¥Ú<„ÝOˆÂn£Eû¸ÉÖ&D3CD·Ê,ïÁØdžá&?9{Ë”O‡Rç݇Ö*Å©uö3,‡»¡Î7+Dnò促þ,ã¹ÊÕÝ+œerú=ÎkÕ(ogÈe‰ôçXÚz3H“$I’z‘¡¤/#œ`4?Ðýž%yÁ–ãvwúÆúÄÉŒ£Ÿæ©»­SÓ:úBüpÕ~ŸRøñë-bÙ"¥BòÇŸbr& {Ë”VZï7Þ—n$HA­¶ÍV£J'š£8™„Í-šnH”KÒ>:èvyé¼ü~pôŽûp°Ç^hTùè ©ãv·,+Ö¥Ãß_‚xôõßâI:W ëbôõ½Çyí»ôÑg$I’$½ÁPHÒ—±_aùÁ¡PžÍóà¢ÚP÷ü£3o2+P·‡µ'Ô:+É÷ }ø¢Ï7²ÓLÀÁÁÁŽ·ŸD6›+,oJ'H§‰ó„R©J'”&1‘¾nRrÜ~=+¨û=D´ÿÉF4ÏÃÇs$?âi¯´v©™DücZ¹¬ŽŽ^ÿmo·F(t•óŽè<ó"1âqd¹˜$I’ôC!I_À>•Åyj¡$sk¹3ÐasvñdM—>â à¨ÍsàF*G&ÕÍÆ©E©³”®´ÖÌÛ‘nˆ²¿ÇMÖWqƒä‹cœÎ.ï›×K䈳Ëî.¤1ˆ ‘‰vØÛ; ”Ë0 „Yî ÀÆÊ:Í6Ðn²¾²“äï(‹Åˆe)Î }àÓN´w(/¯p00ÍÏVgu@¹|²öÓ~…Õò“™a¸#ª±ù¬»ÔáÓuÊoœ·Ëj¹{¿ß8ï"±$¹¡MÊ•î9í-JÙk­ÏrQ’$IÒwÃPHÒ§qÎBÓAP¢ ìWY¬Arf†‘Ø0“³y¢Mf«´é'1–¡¯õ€ŸƒÛ¬¥™[›#Ù,òcðór›±>¤8üa«ãDÒÓÜSûÇO¤n•8ÎŽ14š­j/œšdætZä²ñž¹¶'!@’äP$‘!FÓ'3rÂÃV1Ó_åîH@0r—j|ŽÇkã ^qœ±Ñ"ùèÅoL;×éŸáȯT¢3ïÕçûKîßân: ¸]&º°Äø À0ù…1ÚK·R#ÊôUIòñ&…·Î»H?¹ûIÔ~e$Hý¼Âqq‰Û±Ïva’$IÒwáo/^¼xñµ!Iê1‡ ?m’ùs‰ìçY°H’$IÒ;8SH’$I’$© I’$I’$õ ËÇ$I’$I’z3…$I’$I’z¡$I’$IR2’$I’$IêA†B’$I’$I=ÈPHÒ5Ѥç|JÍSû ?i¿‡T AP¢yé¸ TNu|X)œÛ·âsÝ#I’$Iߢ¿íHÒ'•œãÏ¥,7¾ö8ÞRcqe‹ÌlŠð{žy¼ß`}ýÑâÈ7x]’$I’¾WÎ’Ô#†)ÖëÔ¿b`ÔÙ(±¾ó¾g5Y¾5År«ó9†$I’$©‡ IêgJ£vVÉAñ)m`¿R R*û·*,Œ§ ‚€t®ÈZ³ýª¥ã5 #)‚ ÍÔZƒöUòšh”({,/Ÿ_šu~‡T ¬Ôæù)øþw1 È®ÒÍ–yZ‚,«'aS·,í6åÐnRžg$iÆ*켺Œ“û1U¢TH÷¨¶ßÓ~¥@êÔ=‘$I’t½ IêMƒyîÝ€gˬTÊ,.Ö%g˜ÉöC»ÊüíyjÃ%þª×Y»ÝáÁDò>pÜ 4ñ€Z¼Èõ*÷·)7®Ð_l’âôÔYÙ:~sß…ýÝ »ôˆ­UŠSëì%fX*¿÷H’$I’¾.4-ézyŸ…¦Ã Fó¬?؃ä(É“pä¸Ý­£ZŸº¥[/µÚ†Zôõ½œVó¡¾+Ž-œbr&ÉÆü2¥•è«Í—õ÷– ÒC°XÛfë¨J'š£8ÙàîüÍÑ0Oˆ2™¤½{Ðí2ò:ÒyùýàèT»}!~8ÛÇÁ{*Ãý®n-I’$]K†B’z×~…å{„B!:ÏæyPM±Ž@¨»;ÿ¨NqøÌ9ÍP£µÃ7€çtŽ®Þåì4Ó«5–÷^o¼¬¿·^tßO"‡Å–[„Ò ÒÃâ”(•úè„Ò$!rÔ ŽÛÇp2×§û=D´?p¡hž‡KqVžÿà7¦I’$IúöY>&©GíSYœ§J2·ö;6g©¶áF*G&ÕÍÆ©E¨³”šÇO3‚Ú“gìíF™µÝ÷éw|qìe¼£?úˆ'€£6ÏOŽ%rÄÙewÒ‰ĆÈD;ìíÊe‰,w`cefh7Y_Ù€Ir‰wD<±8±,Å™¡|cš$I’¤ï¡¤ž´_Yd±É™FbÃLÎæ‰v6™]¬Òޤ™[›#Ù,òcðór›±>¤8†p‚âÚ ™Ý·R#šqrÉ÷ë;œšdæô9—õG?‰± }­üÜf­ÅI$9I¤C@ˆÑôÉGáa «˜é¯rw$ ¹K5>Çãµq¯8ÎØh‘|ôâ7¦I’$Iú¾ýíÅ‹/¾ö $I’$I’ôe9SH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’®¹&¥ 8çSjžÚ_¨pøIû=¤R‚ÍËÛ¯²0>B*R#Œ/Th_µÏ5vI’$I½àï_{’ôE$çøs)˯=Ž7´X+Üe£Ž?êYú›Kd'æ)ôÇ©Œ~íÁq¼ß`}ýÑâÈ7vß$I’$} †B’zÜ0Åzâ×èú°ImHõÓ0\ R/|‘œ£Éò­)Ö“sŒ~í¡H’$Iú,,“ÔãΔ`í¬’ ‚âSÚÀ~¥@¤(Tö8nUXOé\‘µfûUKÇ;kFRAš©µíÎ;ºGˆ¬™ZkÒ~뀳åa”¤uZ<+“R#ÖvŽß<j‰Êj‘ÔÙý]Ï!•Âëµy~ T¬O“$I’®¥Õ‹µ IDATC!I:m0Ͻ;ðl™•J™ÅÅ¡ä 3Ù~hW™¿=Om¸Ä_õ:k·;<˜(PÞŽ”&P‹ù£^åþà6åÆ;úŠ$ÉßžÓx0Áé)V¾4çV©Öÿ ¯ñ`¢DãôºD]:™%ž>½Oæ¨Æƒåj7dºðzn]zDºewõ%²ÖI’$I׎¡¤ÞP›ç§Ó M_¸8s˜áÉYòÑÖç©‘df&K?Ðnl²Ùt&Aè¿™!É6åZ v«lt “ËÐD9nÇß5¨0Ã…5ÏÏ,OÝz5+éÊ’921€~nŽ&¡³Au÷ôþ 7ûȉ$PÛæà]×#I’$éÚsM!I½á}š'Ͱþ`’£$û»›Ûݯõ‰ [ZõR«Ía¨@__ädã„ú®Ô±ì=Ö2“4Vf)ü¾Mm±L3[`ø*§Ÿq£?ÔhµyW—]$I’¤ëÏPH’ÎÚ¯°ü`P(DçÙ<ª)ÒuwçÕ)ž \š1 Fkÿ†oÏé½GŸá~…ûÌìþÄ|í¢Åˆ.h³/Ï8Üo‹Ý.ïó²ëysÕ"I’$I×åc’ô†}*‹óÔBIæÖrg Ãæì"Õ6ÜHåÈ„ ºÙ8µu–RóâiÆBP{òŒ} Ý(³¶{yOíê=RA@¡|R.ÖÞc·¡L‚bq µËÞ1ÐÚ¢r^›UÖm`ŸgOj#ýÎÒµw\}ÄÀQ›çW¹m’$I’¾;†B’tÊ~e‘Å$gf‰ 39›'ÚÙdv±J;’fnmŽd³ÈAÀÏËmÆþùâp Šk3dvKÜJPhÆÉ%/ï+’žamnŒöÊÏÝuŽFfi&çX›KwßJFŒÑ¹ßH´×™úq„ÂV™óÚL¤è{Z Ü¢´›af­H"|…‹½ìzè'1–¡¯õ€ŸƒÛ¬µÞóFJ’$IúæýíÅ‹/¾ö $I’$I’ôe9SH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’$I’$IêA†B’ôÝjR ‚ÓŸÔã ZÇg)P9|}æa¥@”š§šÛ¯²0>BêÜv>ƒÃ-J'ý¥F ¬í\ÒÙ·p¬$I’tÍ IÒ÷.9ÇŸõ:õzúÓ†óü±Hµý­+I’$]?†B’tD¦‰n/²r&Ýèl”¸0ã9lRÛbýô ¨ÔëTÆ?Ó@hmC,Þ2ì éέoõXI’$éú1’¤ë&' lžN7¢Q¢ì±¼\áð¼sÂ"ëE¦Öš|þÉ21™­Ý}ÚM¶Bc¤ãßê±’$IÒõc($I×M8B ÕzÅ&)N@m‘•­sŠÈ"Iòw€ç4LðczŠÕƹñÑ'!ýÛ±ò-‚ àÇٷ׊$Âßê±’$IÒõc($I=b(_d,Ôa£´Îö[{à Öx<7J<?ÜO¢pŸ™$Ðé|¶aJ’$Iúº …$é:iïP^^á``š;éÈ9 ’/Ž:{Zõ©  P>©jï±Û‚P&AìsŒ3žáÎ@•JõdÝ¢ýMÖŸ Kžó¶³oáXI’$é2’¤ï]mžŸ‚€ F~¥áñÚ8EáÔdwÐ)‘ô ksc´W~>ig–fr޵¹4çEK-pH¥Pj~Íc%I’¤ëïo/^¼xñµ!I’$I’¤/Ë™B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡$I’$IR2’ôéV(AÌVi¿ÞA¥p²=(P9üŠc|K“R*œ?¬“ýg>©‘qªû—·yÞçÂ~>Ø?¨¯—?«Í湺cË-59>§ßÒçéT’$IÒ)†B’>¯Í-š/ÿ¯¿½M­öéš>Þo°Zzú •÷œãÏzz½N½þK™#6îXk]õœ“ÏR–_jÌß°½ßK†B’>Ÿtš4ªvO š!“xó°ÃÆ*…‘T·¬*5BamçUIÑ~uñ“}¯Kµ©&X¨ÍóÓI)Úq«ÂÂxš H犬5_5'eTS%J…Apê'ÉpÚ46«ÊID>°—c[¢²Z`$)°¶óº¨ê²û@§Å³Ò8éàísϺøÁñÎÚI?i¦Ö´¿@ÞŸfz‘å‹~ ‡ V #¤®pm’$I’Þ¡¤Ï(A"]i±[ë@2Î…ÚOYœZ†éÇÔëuO÷Q{p—õàð)‹w7hßyD½^çáèwg)ïß »ôˆ<œ”f-‘ W™¿=Om¸Ä_õ:k·;<˜(P>=¥Ñ">³E½~ô‡f8µy~zµ6ÐL­0”Ï1tY{oœÓýÎ.¨ÔØ¥“YâéÓûdŽjÐx<—d÷÷)n-]²2ò9k -eϬ(”Ìp³ˆ ‘Hµmà÷çå¹921€~nŽ&¡³Au÷ía\zv«lt “ËÐD9n’2¿w O2›«±¸ÜxsT«Fy;C.;Hˆ$òŒoRÞú¦V(—$I’¾[†B’>«ñQ6)/oR#Iò씚ç~¼ÅÄz‹X¶H©ÏÄ­ÿ"=µÆyDÇíîô‘õ‰“Ù8?ÍSvO¯õÓâ‡OzuabÙqr@g·õy¼¾ìþœãF €VëíÑ\v[-úú^þ|~ Ô÷i.áÝÂ$&‹ —Xß9•vµ[ìÒÇ«!‚/³BI’$Iéï_{’®¹Áé¬×j&~ãÍÿ§?¬=¡Ö|±HvößX8:Ìàí‡To·i5·ØZ]áÁ³Ü]OP?ÓO¨ûGþQâðÙA|÷›G"„?C³—ߟ×÷ôp¿@,vºs^»ì5c@Öþ! ßžÓ9úTWq‘4¿Í•¹µ¸Êh˜î½ŒÄˆÓਠÝée'ú‚ã’$I’®1g IúÌb$ÒÝo¡t‚Á3{Ñî4ýýC8n²¾zj!›UrA@¡Ü&6<ÂíÉq Ö×ôOGmž7R92!¨n6hû•A¥Ôü¼ >-S&Dæ¤$ëS»ôþ¼ÔXe½Ñöyö¤¡1Òç”~]zâiÆBP{òŒ} Ý(³vN Úçtcd†™È//1–$7´I¹²Ch7ÖYm$É$n\ÖŒ$I’¤+2’ô™Eºë £É·“ŠHzšû£qjÿø‰Ô­ÇÙ1†€F³ƒã,ý+Ohív·ÜiªLtú!sÙ@?‰± }­üÜfí(ÍÜÚÉf‘ƒ€Ÿ—ÛŒýó!Åá÷œ¿ó®E¡Ïìÿiq‡äo˜»låêsÚ ‚Ò•æ/]z^J¤è{Z Ü¢´›af­Hâ¼ËŽ\r Šk3dvKÜJPhÆÉ]^©öô“žfàÔßs÷’¨ýÊH]Ø'÷h³Ë1I’$Iú0{ñâÅ‹¯=I’$I’$}YÎ’$I’$IêA†B’$I’$I=ÈPH’$I’$© I’$I’$õ C!I’$I’¤d($I’$I’Ôƒ …$I’$I’z¡¤Ð¤ç|JÍSû ?Wߟ¥mà°B!ºÂùך&W\£Ù~ÇÏû|’q¿ë|Ì=:¤R‚ÍwüžŽi”²¤f«¼uëZeÆSž~–ª$I’¤Óþþµ éHÎñçR–Ÿ¹›ãýë뇌G>{_:u­Ç­ woÏ31¹üú¿Ðýù~„ILÏ0úã]–¶þb6>Ù~H¥´HdæF¼Y’$IÒgçL!I߉&Ë·¦XnuNþ>L±^§þÖp,C~¨Õؾp¶ÎN19—àIi…æqwS»ú€Åö Ålÿ×›$I’Ô# …$}Yí&åÙqFRÝò«ñ… ;§•ÃÆ*…‘T·Ä*5Bam‡c©&X¨ÍÿÿíÝ?L[Ù¾÷ÿ÷‘®LK¿+hBK?É4Py7ö)âiÌ!NC¦i‚ O1NFúŠ!MœGÒ$MH1ÐÄ)bŠfœbpc7×n®S(ŽÑ•ŽS£m–§oro'Éýõ2å?–HÖøaz™m€æ:÷fÂì3Êå2Ïf)=¸ÍêöKO˜‚ÎV¬ò§-ªÔh§–X_¿Oj¯Äƒ‡ÅNhÔª»ù€R4Ë‹r‘¥ñ=6+íiZõ5VŸC(•"îqciï)”);4ç´ºO“£m+´ÒËË/ÈFK<¸™£Ò-$iY¸¾@i,Çå2+×Û<¸™!¿sr~îl‘ÿ‹óó~ÃLdïÀ½»dç)&ï25rxm‡|æ&¥ø¯¬—Ël>IQÉÜîÔZ}Ì͵8¿n–)— Üb܆‡I’$Içe($é¯;zœr°qk3ÏÃW0ykб0cêÖ$¼zH~³áqr¾`6Tbi1Köq Øeï<+Vâ).áQbq ´Å.@­Èó6į\fÇÒ\þµgýûõÇ4'çY™OÒ+êYeÊGþ,OµN«û,sO“Š sùJÚÏ)ÖN–Ѭl°Ñ†d*F¾œ"ÎùRýÍü¤Ò©¿6?çIsgv—¥wfÇ8<]ˆz‰üÖ$Sé‘ÎÜO0>¨5f°¾ÁÆú6;­0‰¹Í“ó)I’$é½J‰z€Èða• ~@GŸûÐèÆú¹ædh8”¨×0öîµV³³UlõfÐÙŠw¨Þ¤ª08xo}àü|€ÁÁÄÆˆùb³Nçüø÷çïÞ<Õ„ô4O–Bg5¿M Y!—N0WlÒ,Þ%‘Éw—bøb„ÁÁpï-|’$I’N0’ôù Œ‘Y~Âá"·Ç‚ñÛ£ó<[é¬ð'g¹%JéçïI\ÍÑš˜d¨TëÀ0±Éƒõ\ ®³R?ïØ1²O~"^Ëq51Î\5J2ö±ð]š>ë[ÏzÏÉX‚Áõ Éà*¹ZŠ;+YbÝvR…“̯̯fù.¸ö°Éä/ÈŽ tæg婃ùÉT£¤ãçñ?Ì0éûˆ•~d<&ÙI?a>&œÌò(¾Iv< \{Ü"»˜ÆÙK’$Içó·ýýýý/]„$}~ ™ïYhÏóâÑ„‚$I’¤¾ãJ!Iý¡±N&&–:+tš[TJŠF „$I’$õ%W Iê;ÅEæî­±µ×.0:™ånv‚ˆ/­’$I’Ô‡ …$I’$I’úÛÇ$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡þãK é2ß/PêuOhèXŒ+7n1‘ˆþ\µ})ÕÁÍU˜zB9;v†Ûn®ö¸áÂ%¢±SÓ7˜úxu~ÕT–feðroŸ¹QÈðýBéÌs+I’$éß‹+…¤oM{Zeƒ?^c:W¥õ¥ëùwóúµ—«,ÜüžtŸÌ_5÷=3+´¿t!’$I’>+W Iÿ–¦xRÎÒuíF«ÁöÆCæÖxµ:Çê•Ó#Ÿ»¾¯_|þw–&º­j±³™#óãs^­Îñ0õŒìØÀg¯ïk04±DyâKW!I’$éSq¥ô­bdâ.¹;£À.7ª_º¢3 'æXš»¬*}±ZH’$IRÿ1’¾Q‘hgQ»Ùìj4*+ÌM“‚ IzfŽ•J£GM¶×sd¯m³H¾zJ›F…•¹ÒÉ€ HŒ_'›[g»yòÖj. 2-¶WfHA2ÍL¾~dømÖsÙƒþŒO/RèÖÙG2KØÝãp”3ÕÙužN™ÛFLd 4hPY>ò|׳,÷ú~¼3½ç·Qȹ*ìïr=$ƹþÿýoþWðöŒ¥ÒÂ÷A@¦Ðx§]ë,žc|8ø¼Í¤;söæþ§}|$I’$}z†BÒ7ª^ëü" ‡ywóÓÅ»i¾ŸyÀÆÖÞÁ92¯yUÙàÁÌU®/u9G§µÍòô8?ü¼ÊËÚÑ6Ϲwó*™ÂÎ;·76I?à ¯^w¾ÖÞ«ñrõg~˜È²¾s|€ŽÚêmn>¨ðàõ+.]¼xÐa‘»Ó?ðóê˃þÚìm=gá‡irŸ(j·9­çSëlÕÉgºÍS¹ I)7ÃÌÃ#ÏW{ÉÃSÚ´¶—™?:g›ßVq‰Ìí5jm ½ÿïÿóA{ˆÏ;þN!ÃÕ™lT^qpûÁý ܼšaÝ`H’$Iú" …¤oL«¹Cu%Ëô½-à·Rïž<´S¸ÇíµW„¢W˜ÿíÊå2åòŸüþÛ˜cí„âwøíNå?~ãN|—ÕÕµO2õJŠDx÷ä¡ÓêlQ}œå^éø<ýÉ‹_§ˆ†:s»Pì5•°° Wæãƒ6Ï~éÌmíé«G¾­ o?d«}ØìÛù-ÿù‚G7F ½~ÉÏsyê]žéùÓ§0õèÍOnÜd©\æÉTçz|þwÊåò)g-}àø­ Ë÷J´¹ÄÔ£ß;svpÿ¯“— ]âáÚÉOƒ$I’¤OÏPHú·´Ê̓m8Çÿüý»«Ü|ð’ׄ]dêC¦«¬Þ+Ah’ûOî21røÂú†F&¸»4Oœ6ϯñf±G³Äêê.gþÑÑ6aF&î²8{xÉZ±³Üc{í!¥6\œZâQ&ÆðÁ2¥¡Ò¹eò´[8¿ÃüD¤³²i` ó÷ÎËm¸8ÅÒý4o‡!}ÿS?Ê„¾ÑjîPÍß%{o %›îrœw·:›%VŸ¾âÌ/½;·Ã‰,OîO¢ÍF.ß%ƒøüw'F´‰Œß}{®Ñjéͪ¥ÆÆ2«»0zg™GÓoç—ab™G,M]„­+•.k’BSÜÍÆÞŒñ!ÇgŸ{üæ»m 6ÍTlèí˜Ã$æò”Ëe ž„.I’$}†BÒ·&4Èhê¿ü¶ÎòôÈ»¿øW‹äÛ@zœD·D`8F*lßžõR«°09EªË’‘é‘Õ%u¶6v(ÓéX—Ð!Bj:ÀF¥vâêÅX”ãC4ªE¶€‹é+ÄŽw80Fz:ÚåAÞïðüœ®¡Ú½5^">¿H:r²m·:[ÕÍ·ó4|²Í@"Í­‹Àn‰Ú‰í]“Lui4œL“بЙ­•#ïRÄ’I ÍÚæÉù%£[«³û€ñªr¹¥Mê í–$I’¾¾’^ú·tü•ô *K·É<Ý"46Eæ§ib]œÆN­sÎÍê Áj¯þ+Ôva|;ubc‘3¬,iR¯ĈDºßŽÄˆ²A­¾CƒØ;áJdpðÄý»µJçÚÅ“×"ÑÐ%ù@.E‰ÅÒ¤¯§HDÂ]ïéVgsoè5O¢qày…Úp4ŠíÖ(!…ZĆvéLG……« =ž£]Û¡ÁØ»áÕà ÝŸè¬>`üpœ©—ØxúŠ­§?rí)pá—'¦¹žN;eŽ%I’$}z†BÒ7aˆXæÚ×¹¹ú™™=ž¬d;4´ÏÚ_ûÄ­!BçªèÔ»/„èïœVJ§’Èpsn>@|þ÷Þgç| Óçi€ð¹÷k] tl²Úçø~ 瀱Ì2¿E–¸—{ÎÖkàõ+^®.ðru¡s¶Õý»ŒwY]%I’$éÓ2’¾ŒesÜ©^ãÞÖ*3 1Ö“]W†„n¶Óç©EóÜ;§^Óî:Yq~ù}‰ñ/6ç?ÌÈÄËs4늛ëó¼|õšvmŸg ?ëbJ’$Iú¤½7óTªò*û:µ@Œèñ¥R•]Ÿ®Y§RBQ"CT8e:>±¿>~8câú¹|‘òï÷™¼ìæ)~¼€’$I’ÎÈPHúÖŒLqwê"Ðæù½‡}›9ÑWB@i™§Õ.O«Bn" æ8|9ØÀX¢sØñó<Ý^Öª,’‚¹"M"Œ¦.»ä×*]B¤:«/HÅÎv@ôP,Ehç lžè°ÎË|éLý|joæi#OñÄAÒÐÚÌóx¸'z|«T;OáäñS̳„®$èÌÖcÉQ Mþq.ðSÈËŸâ5ï翵¹Ø9ÄûîæÉÏÃPŒÄù¬I’$Iúˆ …¤oÎc·²¤BÀî*¹µú‘K1Ò·.¯X™a©²óæõÖN…¥™ «»pqjŠø›W¿w †—,ÌäØÜyÓ‚Ævž¹ìsÚ„˜LÇ #Wf‰‡`w5ÃÌR…ÃÛ[mòÙiîm—n0?ãÃC)¦&CÐ~Îí›GÆonS¸›íô÷5x3O%2w)l&h-v6sܼݙ§øìN¾€½ÍóÛG¿M¶ wÉ,”€Q²×ß¾Ém85M*íÒ™»¶o&øH›8·®œý5UN{Í÷oì;ïø±q&CÀÚsùmÞ¾|¬Év!ÇR'õ"ña/‘“$I’ôx¦ô- 'ùéNœ…[÷r./qx¦òÈô¿T¯óóË-žÎ\å鱦¡ÑYîÏŽyƒÖc·rÜ©]ç^i•¯mYˆèGdß?4ÎýG[\¿¹ÊÖÓ®àÂe~YÊœãü˜ÙGÜØšáiíøø—˜âáÞ¯RûLË,1_»ÆBi…ÖŽ½«3O÷»Ä›b*”ïòý¸Àå_IGŽ|)œdþÑ ê3O©­-ðÃÚñw€]âÊý;œç íáȰKíÁÞs÷yLjqkþ2?¿äå½²úH.RIDATxyïx‡—˜z”%æyB’$IÒgçJ!é541Ëì%€÷œs3Ìx®À³_"5:øæ]Y.]fjþ /–§9þ ú@„ôÒ:¿ý2ÅåK[p)–â§G/XÉŒ½óö±,ùßñS*Æáí¡ÁQR?ýʳBîüoš#³ò‚G?¥ìT|at’ùg+LÇÎÙ×'5ÌÄá[ænò|NþÄ£ÙñCÔöztÞñ‡Æs~û…©Ëу1€ —ˆ¥~âÑïy²ž0-I’$}ÛßßßÿÒEHR_jÈ|¿@)>ÏïKg:x[’$I’>W I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’úMK’$I’$õ!W I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ¡ÿøÒH:›ÿùŸÿùÒ%H’ôQýçþç—.A’¤¾ö·ýýýý/]„$I’$I’>/·I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$}fÍí‹3i’A@$Ÿžc¥ÒøÒeÓ  ‚ÕO5ÄÎ:‹ÓI‚ 1>ÍbqçËÍâ×óõcm dNÔ´Cqqº3ŸÉ4sù:­OÐO÷gèÖO¯ÑÚ^!›î<rz‘wÿüµìä§ ‚iò;ǯTÉÁÄ2ÛÇ/m/3™_Û'P’$IŸž¡$}F­jŽé–iOæX/—)—×ùu¢ÉÙ«Ì›¼ ˹õ¯ïþV…ÅkO g ”Ëe6Ÿ¤Ù›;h´¨nVIFÞ¶Ù)r7³@éÝŽ¨.eXÎR(—)æ\ΰºý±ûé¢k?=¾~T³È½›%¹uÊå2+»ÜάPÿÐZ¨Sʇ™œlS(H…€(ÑpžÒ±>¶Ky¸x±WÇ’$Iú† IÒg³ÍêÜ*¯®d¹3a€0#é%6Ë›,&Ãy¼*¯Îð°ÞþÈý~µ"ÏÛ1’cÏ<œbêÊÅêa|U£´‘$6Òù_³’ãúõÇ„c—ßí§U¡ðt„éôa€pŒl¡Àô¹ŸwÖÏ©ýNrws‰t¤ó)¾œ"þªÄvãüµ°]dy0Å­tнå“+‚$Ž“'ª³µ1F:é]«$I’¾Y†B’ô¹l—ÈïB<=„º9Øê3“#—Iw)6¡U/¼Ùj•LgY©¾]UÔ¨,“OAbœÌÊ6-27Y(-ð}¡ÐèÝOk{å Ÿ$3+š_ Kªïîuþ±]¡˜Š=¼p!É|a…ìÄð±5J£\<-OûXýwZ?§}ý`;^®ë~²&ÛTâWˆ }@-´¨ä½’dh$NšÇä+'7›…FŒåKo£z…üÅËÄÏ:Ž$I’¾5†B’ô™4wkì‘ÈÐûo®Ô‰ÞÙ¤\¾K’" ×(åø£\fåz›73­VÍuîÍ<„Ùg”ËežÍRzp›Õí!&–ž0Ÿç÷ò=úiUÈÝ|@)šåE¹Èý‘-ò•O8‘Q.S¥ºÝ /Zõ ò°»×I¢µ c‰±7áYx$ÆH·¤½Çn$Ä^a‘éäÑPŒÚÏq§õsjÿ 1±T&;vìËÍ"wǯqóÁW¦b }@-´*¬?“J„RÓQž¯WNÞ?ãrôí²z%Ï`2†™$IRÿ2’¤¯Q4ÉØÁb“feƒ6$S1Âl5b‹|©áqr¾`6Tbi1Köq Øe¯Ë*ŸžýÔŠ™æp÷áp|‚ÑçyNQ&–Œ°±Uv¨B$cg(%I’ôÍú/]€$õ‹ðàEB@½Þ€±÷ü2>âÂÁ?[ÍÎo÷«7ƒÎv°Cõ&4ÖÉ\ý™JtŠ;Ù,¹‘6×îu?â¸W?P½3ìàa€rÐ'^B2œœc¹8wð¿… ÄS¡Ue³'}|Ö©ÒÜ:Ü®50F"Ùænµ£{§ŸX¯Ã|>’á ¦Ó dŠ52ÉóÔ²ÃÆêKØzÉߟ¿{euc‡ñc?O1øc…z í îŸrE˜$I’¾j†B’ô¹Œ¥¸uñ)+5ZC=Î:&ÔùkêÉÉíGÂcJm˜Êf™ƒ^‡J÷è‡j(Qß9 ¬^ÓÞ;kA«Fµ"rkjOÙH¦˜{+ˆÄHQáøS‡ÃP+}œ~>‹&Í=ˆÄÂç«¥^"_›äÑŸsÄŽ\n®gùîi‰z:MäèýC1’¡9V–ôÓY"ðõ½™N’$IŸÛÇ$é³ajqŠKk9î­×Î|i²ÏdÖ»ÿz>”H“ Aq£BØ)d‚ rÕáÎÊž´ª¬.]ö1H4ì5yýž~ˆ&™ Aií%;@³’g¥ö §¢Ua11ÍJ E}c•µÑ,é1Ø®IÅϸw-#•*?œ»f…bñ©ÑÈGëç“ØÉ3Xdó`‹W³šg¹çÆåȹjÙ..ÊN¿„“×™¬/S<ñ²abmž?¯ºg“$IÒ¿ C!IúŒƲ,ÿ6Mhu†ï‚€ çÇB˜ÙG/X?eKY8ÉüÊ<ñj–ï‚€k›LþòˆìØáä,÷¯D)ýü=‰«9Z“Œ•j&6™b°þ€kÁuVöNï‡Ù•;¤j9®&ÆÉT£¤ãŸr"bdŸ¤(Í$‚ï˜Ùˆ±t?Mäà5鉱³®Ð “œ_&^¹M2H\[fðî#¦G>V?ø|'{ûØpšûKÃäÞ7‘«1ñä>ÀkiUÈ?Ž2•ê²?n Fú<Ο/W I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’>™*¹ Èhùj£!rÕé³A!9>¨ùW7Ž$I’¤/ÅPH’¾a­ ˹õwB)I’$IC!Iú†Uyxu†‡õö—.D’$IÒWÈPH’¾¨ƒ-f39–§I‰ñ9òõÖ›;ZÛ+dÆA’™• ÍcO£²|p= HŒ“YÙ¦EƒBæ&«¥¾2ЪXœNÉt–•jóÌãH’$Iú¶ IÒ× R…ô#Šåd£Ü»ž£ÒZr7PŠfyQ.rd‹|åH»æ:÷fÂì3Êå2Ïf)=¸ÍêöKO˜ˆÏó{y‰‰" ×(åø£\fåz›73äwÎ0Ž$I’¤oŽ¡$} biR#À0—¯Ä¡ýœb ¨yÞ†T:Å0Ž¥¹=Ò.®»ìuYåÓ¬l°Ñ†d*F¾œ"ÎùRýýãH’$IúæüÇ—.@’„ tðÏ¡áP¢^oРÀà`øàêBƒGÚ5ÖÉ\ý™JtŠ;Ù,¹‘6×Ñjv¶Š­Þ :ÛÊÕ›4BïG’$IÒ7ÇPH’>™‹DãÀ^›×Àбkƒ¡#ÿmÃáâžÆN€Hdˆ!"@‰úNƆ€×´÷Þ6k”Ö(µa*›eb vz*}0ÞÔ“2Ù±cת½Ç‘$I’ôíqû˜$}2CÄ¯Ä ÕVXÙÏïKǶ˜I’$IÒ§ãJ!I’$I’¤>d($I’$I’Ô‡Ü>&I’$I’Ô‡\)$I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’Ôo2A@«|¡J.ŽþIŒ3½X Þúte´¶WÈŒ'‚€äôùí#ƒUsïÖd(4ZÔ ‹L¶KgY9Ò®µ½ÂL2 æ(6{ñþ~N­íchȼy¦ó¶Ý$7=N"HŒgÞ©ût-6G¾çŸbŒ.vÖYœN‰ñi‹;o+Ú^!›NÌñ"G./†B& Shtù\tþ$–º?WÏ1>¤¶÷ZŸïk×k¾O­³×g¸ÇµžŸ½Þ?’$é´/Iê/ÿ|±?‹íÇîÿ×Áþkÿ~,¶›}±ÿÏÃ{þUÞ¿?ÛÝx¶ÿߟ¤ˆíÿþSlöÙ?ö÷÷÷÷ÿñlv?¿¿XÑ?_ÌîÇ~ùsÿÿoöžì_¹rÿÏÜ÷Ç/ûWâóûü«Óçwbû³/þñþ1ÞÓO¯Ú>Š¾ØŸÍî¿øçûo}×?öŸÝx[Û¿Ê÷Ô}ºÿû_¿îO¾ó=ÿøcœ´¼ÿK|jÿ×ÿ:høû³ñûÏþ±¿¿ÿ¯?öçã³ûÏþ»óþdzÙýØäo§|Öþ¹ÿb6¶?Ûm²þûÙþøîµõãcÕvtü^}ö¬»Ç|÷ê³×g¸×µ^Ÿ½ž?’$é[äJ!IÒIáÙÅY.nÝãq±ùñûoU©¼Œ‘Š0Okש¬^Ø­WˆŽ 3p¼ÝÈ4…B–D¸óß¡äÉö•:@‹f"‘á÷Ñ«Ÿ÷ÔöeíQß‚H´S[8–:òü§©³öp—‘ËŸrŒ.jEž·c$Ç&y8ÅÔ•-ŠÕ„“ÜÝ\"é|‡‡/§ˆ¿*±}®9nPÈÝ#|ç'’á.—{ñQj;6~¯>{ÖÝc¾{õÙë3Üó礇m'I’þm I’º‹D‰Ÿâ7Âæ»„…þ zs±ÁN­ÍèÅ‹gè¨ „Ñ ùž…¬Þ<Ø×sŒÓúy_mÀN‘Ń­>AbœÌÊ6§n°iVYɼÝ´\9´©¿Ì1 ’Ó,êoûiTXîÚ.B,¢^ël!jV6Ø M’Œž>CõÅ+?15|ì©Ïñž1N­ílê»{Ç'‰í *ñ+ĆŽÖ–$\¿»F­Ý¥£í5×g™êrñ¸.c|hmçÿDŸ'Úÿ{z²Nxç3üÞk=>{gîS’$} …$IÝ „;+uêu>Í"™Ç¿ yóõ]j¨=½Íxâà—ÖSœim®“¿t‹ÔØK¿3‡øücïã´~ÞWÛù¹9¸õŒÍr™?ŸÜ`ïÁCŠ]'h‡|æ&¥ø¯¬—Ël>IQÉÜ&ÿæQ*¬£,Ë”WÒìÞËð°Úê´»=CåM»$•ÌC: ¶Â$Z"’¿J|7WçúJ–؉%UoŒÇOc]‚‹^ÏÑkŒ^µå2UªgÒ´êä7`wïHÂÓ,rwü7ìpe*ÆÐ›Ún³›zÄåMMì±Y9ñ`læ28bä”Gï9Æ×Öcü³ôÙµîó}¦>z>ñîuí´ÏÞÙû”$IßC!IÒ×¥¹Çnûc·Yß,S.Üb —ayûØ};nß®0uwêýÁ@/çêg˜ôò&s‰ÍFZ­N“ÎzŠê%ò[“L¥G O0Þ"_ªÜp‘[³ ¿¹¶K¾X;h—"=qØ.ÍÒæÝÎV£V•¥™ {7~§\.óç£ÅéÛN9¤y{õ1ÙnÏÕã9zÑ«¶ãÂãdïGÙ˜ù;AbœÛ›&&Žß“äîú:›/¦Ù½=ÃRµubŒpì·âÇÚ5K¬=“¾||ùS·:ºŒñ¡µõÿ,}vk×k¾ÏÒ'ôþ w½vÊgï¬}J’¤o†¡$©»V³³¥$2ÌY6èœ_Ã6ê‡_'Y,çÉÆ’†p‚dòùÒ‘T¨U%—¹w–ÈŒ¶L¦Çïíç´v-¶WfHãLÿ¸D¡þšÃ<¤QÈy³TŽj³Nçüø÷·o›šY…ZýpYM„ÁÁ·#^ŠÆi·fƒ v Zj<}•fj¼óˆ¤˜J•x¼±}rüzžµinu]Ftúsô£Wm'Ɔ“s,Ë”7×Yº~‘fâÑ.Û‡'˜N¿bµXë2ÆÑcÕW7Ùˆ§ˆù`vÿÔ1>´¶㟥ϮízÍ÷Yêìõ³pêµS>{géS’$}SþãK IúJÕk”€T¬Çá&j(B4Ô~û‹h»M›‹ÝƒZ´ZürÚØ$÷ã;ÓÏXšè±Rä}cœÖO¯v"4¹õ¬Èõ°S`æéF§ÙÄ士8êy¢L2ûç‰ã¿W7êÀí×p˜¸½ª•…nA8B” {M ë|œò¸ÇÆo2T6J|·qô®›õy~Ÿú=õ¨íÄóתQ-…ˆÜê16iîA$î2F“½cGèÔ*k\Œ¥ß +ß;þÑ1>´¶㟥Ï3µ;OŸ½~zþœœòÙ{o;I’ô­q¥$餿6ù‡Ù½4Ë®{ƒþª(‰+U6J}O;¥ *—cŒ õÒ9*‡»uEÖÖâL]ŽtÎy¹v›úÔrïý…µÇ=ûéÑ€f'° Ieu™ íîûÇ"qÒ£ÏYÍoÓ¹½B.`îÍ<5–ó•εËùKÜJ´Û _8l·In"ÍJˆ¦¸q©Háð£ V×.‘ŽŸÜà34±D¹\~óçÉ0õ„òÒÄApÊsô£Wmǵ*,&¦®µ¨o¬²6š%=ìä™N,²y0Íjžåbœ—#¹ÌT|ƒ¥Ç¹iVó,¿“Wu"G#]=¢×Z[¯ñ{õÙ«]¯ùîÕg¯Ïð{NNùìëçK’$} \)$Iê(-ð}°ÐùwhÑ+wxvâÔÙÿšbÓóäo^#¸×&4ç§_“…!‘ë<º»ÌÂÕ3{m.\ºÌ­G‹L A=ÿ˜µ×mX¸Êa©Ð9\zéÄ*‘ÓǨoôêgèôÚ†’Ìþ´Æ3«á8·³ÌnþHµÞbbøør aÒ÷Ñ\ø‘ñ{´/\âò­',&ÃtNîŽ3­’IΰÅ(“‹KLœlÇà(Wî,uVô0Ff)ËâÜ$nïÁ`œ[KÚCÏçè5F¯ÚŽOŒì“·g<؃Áø-–î§;Ÿ§á4÷—–¹7äÇW¯¹0š"óä>TCL,>¡9w›ñ I8>ËÔx{Öô»u¿g[SÏ1>´¶ã÷zÞžízÍ÷é}öú gÛ=>ßq8í³×ûçëÓl"•$I_Ößö÷÷÷¿t’$I’$Iú¼Ü>&I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡ …$I’$I’ú¡$I’$IR2’$I’$IêC†B’$I’$I}ÈPH’$I’$© I’$I’$õ!C!I’$I’¤>d($I’$I’Ô‡þ®œa79}IEND®B`‚magnum-6.1.0/doc/source/images/cluster-create.png0000666000175100017510000004320713244017334022001 0ustar zuulzuul00000000000000‰PNG  IHDR¿"1‡e. pHYsttÞfxtIMEà ö¯’tEXtAuthor©®ÌH tEXtDescription !# tEXtCopyright¬Ì:tEXtCreation time5÷ tEXtSoftware]pÿ: tEXtDisclaimer·À´tEXtWarningÀæ‡tEXtSourceõÿƒëtEXtCommentöÌ–¿tEXtTitle¨îÒ' IDATxœìÝ{TU÷ÿÿp8\ì‚·¢`¬ÄÔs’ÒFøŽ³–d¾#®´â¯]ÉHç[I'ÁßüªéT6Øéxì$ðýMtÚŸ8³ßô+Î48mȘѤø&á$1ØDˆÚã ¼D= p¸ýþ‘ûíàáùXËÙû³?ûýÙ×yñùì½}^|ñÅÎÎÎNݸqCÍÍÍjmmUGG‡$©³³óŽÿ0™øøøÜñ____ùûû+00PÁÁÁòññ‘ mmmºzõªæÎ«Õ«W+..N!!!ž¬€ihhЩS§´ÿ~>}Z_ùÊWäçç'Ÿï~÷»iiiZ»v­§k`̼þúë*--Uxx¸|çÌ™Cðxµk×jΜ9jll”ïÓO?íézO?ý´šššäçéZqqqr»ÝòåáVo¢k×®É×Ó…0ž¦OŸNøx·ÎÎNÂ/À»utt~Þð ðz,{x=Â/Àë~^𠘿¯GøL˜ŽŽ9sFUUUjiiésÿùóçuüøqݸqcLÎɲgÀ„éèèйsçtùòe544èÔ©Sjnn¾cmm­.\¸ ÆÆF>}zÌ0á0!\.—._¾|ëë7nèü£š››ï¾=÷;NuvvŽú܆Q÷ÀL›6MáááºråÊ­m7nÜÐÉ“'¤úúú;ÚÈd2ÉÇÇgÔ磙_—e…Ú’•¡«UV«U)9²•VÉ56'º‡J¶mS©s,;uª²Ä¦œÌ4%[­²Z“•¶n‹ í½NRi“Õš­’1=·ÆiL0± ƒfÏž­ððð;¶777÷|çΫÐÐÐ19÷„ß•æ¤+3¿ZI/îViE…***T’›"מµJË.QÍèO2tÕÅÚ~°Nî±ê¯¥Ry™O)û`„2w¾©²Š UTÖÞì½½B¶JÝ}‹öë1€‡ô€{ëà+:ü¶¨2/[/•/×ν۔ž©€î=¡ éÚfÛ¬øòíÚR<¡ñw 9Uº)KûŒ9*ؽN–Ø›£ P¬%[»w®RmQ–löq¿à5 ƒfΜ©iÓ¦õ¹Ö¬Yc|¥Ñ†_W¹ŠöV|N¦’úØo^®Í¯¾ªÍIÝ*e³Ze+)Õ–´dY­Ve:ºvµ8TjËVZòíeÅÅ•½M·ÈQjSNFЬV«¬ÖedåªÄÑ>%Ù²®/’T®í+¬²Ú*oé´j˺îãR2”c+|Fºê òËÝZµa¥Ì}ìH^§Ý/ïVf|_ƒï®ÇjSå[»®ÁµÜ9¦›JÇbL•6Y­6•”né¾®™ºy¹ÀS:::tùòe566ö¹ÿÂ… }¾i4F~«íz[’%ÞÜOƒP%$'+!öÎpX´ã ’ö–©âý7µ3Ýܽ´xµ¶;Rôji…**Êôú†®_'[åí;г´zO‹2òJUQQ¡ŠŠmKªÖöL›ì-Rdzž*ö®‘”¤­oU¨"'Q’ä,ÍÖSYÅ Ù°¿ë¸’\%;6iõ K²kªËU«$%ön¥X%¦%Ê<š_H8 ••U¢Ø-%·Æ”kqè¥1S‘vLÒÞ² ½ÿæN¥›GQ+ŒR_Ouî­±±Q‡cLð¨Â¯³Æ!)Iñ1Ã<0cÒc%Ä*6Tª9hÓ¾ÓKµ57C ÝA22y‹r7ºU´­XIR¥ŠmÇ´tC¶’o…éP%®ÌP’û Êªû9W‹]{¶—+bc®¶$Gv– ŒÜ­J*ß¡=Gú$W]µ]’YæÈaŽoœ•å:­d¥Xn&èP%dæ©¢l‹,ýeîaŽ)c]ºº.w¬Ævá ]Á7 à®%Ð c€=òžß¤ø¹=¾rªòÈ1))E–^É,!1E:]®J§$%*§¬B¶”9*í*-ÎS^nŽ2×îP¹Üý? ªºLÝRŠ%áÎí¡‰J±¸uÐî£QLdÒJ%‹”¹Ey%v9œCøÆkLIºãr€‡¸\®>ƒïܹs5oÞ¼»‚ÕÐР .xþ=¿‘f³¤"U×JÅì¨[’Ê·k…u{{“ný­¦4G™/½+͵(9y©’R–+#3Y;Vï°w·¤¢õVõµ;¾ÿ#câ“$9äpJ‰ã5û™¦¼7cU²¯@¶,íkŒ‹´rãfe§'ô3S;ò1€§ë+_ùŠ®^½*éî§:Ïž=[’n½Øh4*<<|LÞó;ªð«ø$­R‘ìÕ)ÑÜg“J[²²Þ^£Ýof+±Ÿe¼FIZõªÞß’¬þVúÊY¢/½+ó‹ûUÙã\Žcª“ú| ÕÍÞŠWÎþBeôߨO‘‰)Š×UV·(=²¯Êœ*É^!›^Va^šb‡×}Ï)=Ǧô©Åé½h›6m_«:ãaÙÒúŠ¿#xŠ¿¿¿æÎíZšÚÔÔt×ëŒn¾Iêšõ<ïù HVÆÆ¹ª¶ª¬¯Õº5%ÚSìVÌšåý_)R‰)‹¤ƒGt¬W5ÅëdMΕ½ERmµÊ%%ö Ù®ª2õw»¯$)>Y+Õ*±÷z´U‹]¹ÉV­è5Læåڰܨ{vßwÜ«‹Ê}*(7*9#¹Ïà*©E®žãrÕ ø­€H³’³s”!©¼º¯³ŽrLàAƒAqqqZ¸paŸÁÖ`0Èl6káÂ… ³óŽúžß„56m¶Ôó™ÛTRåTWÎk‘³ªDÛ²·Ë¿Q¹kì#veŽÖÄPîŽUu?«ÉUU¬¶j%m^×õà'³E+Ò‘’#rÞ:G±¶l÷ÎÎBÍŠ—Cuu’ZZ¤‹ÖmNRµm‹òì5]õµÔ¨Ì–«1k”³r ùÚP¥lÎÓ*×e®Ë“½æfŠmQ=OYYEÒÊ\mLéû7¡ñ-ÒWÉ%u¿ÎiŸÊ·Û¸ŽlQrr¶Š«n?¤ÊYV¢2ÍÕš”øqx–|}û£>>>òóóÓsŽnÙ³$˜•‘WªE¥{´gûZí¨®“[RÈÜ¥J_·Wo¦'~;p@¢r ^W|Þ.mIß®Ó ÝÇïܯœäî š¢Í…[eÛ¶M+¬ ’1B‹RžÑ†ÂWeÎ|þöÒks’6¬,Ö–µVå'mÕ[yéŠMÏÓ›±Úa[¯Ôê:¹Z”²A{ 2˜‘îjÑ–â·”V¸K{²Ód?Ý É¨ˆE)zfç~¥'›û‚rl†vîviGn–Rw5(dîR­Û²Y›ëÖëæÝÍ¡)[U¸5_y[Òµ£gß»w+ófqc=&˜b|:Çâ±YLR+W®ôÌ«Ž˜H„_€×#ü¼áàõ¿¯Gøx=Â/Àë~^ð ðz„_€×#ü¼áàõ¿¯Gøx=Â/Àë~^ð ðz„_€×#ü¼áàõ¿¯g¨©©ñt Œ+Cll¬§k`\±ìàõ¿¯Gøx=Â/Àë~^ð ðz„_€×#ü¼áàõ ž.àý.^¼(§Ó©æææ 9_`` "##-‰ð g/^”ËåR\\œ‚‚‚&äœMMMª©©‘$–=Æ™ÓéÔÌ™3',øJRPPfΜ©Ë—/K"üÆYssó„ß›o-³&ü¼Þ¨ïùý¯/ëuàã‹:qé†[ÚÇ¢&À«-žª?Ož©‡g‡zº`ÊÕÌï}Y¯­¿9¡£g]_`ˆŽžu)çãúô¬ËÓ¥SƨÂo±ýâXÕL9ûÞ?ïé€)cTá÷³sÌ\#uòò O— WU‰r³2”bµÊjMVÚº-*´;»÷VÊfµÊš]"瀽xBwmÖl•ô(ÎY’-«Õ*[¥ç*Àäįñô­-•6­[[ ÷*›J+*TQQªWÓ]ÊÏzJ[ŽŒý/¶Zjì*°•Žq.׎=ejÓ>àQ.‡ÊŠs•“™¦d«UÖ›R2”µ¥PeŽ‘}V%üSR•жéôÊmN3+@’ª„Œ<•U”)7e¬ÆU©ü§²”ïpq¿’û€MEUcÞ-&œSö‚,¥¤®Öó;èÝê:Ýñé±á´ìoïÒó«S•’U û0gU¿ÀTTU®âZ)Éß|‡¢÷2h§J²­²Zmº¹Ê¸æH®Ö¥%Ëjµ*9mrÔt·[¯"I*ß®ÝK•[%Ê]—"«Õª”ŒVºî¥.˜®‡ïŸ®´„YúëÔûíéºOJ\® 1R¹½zOJn»®ç×JÈÜ­#‡µïËzqé\5ô\:ÜS÷j•5{+TQÑãONâˆ*‰Lߨs¥ÚÚÚ[ÛœåUî–2rr”ž+£qÛãzÐ¥êí‚;Wï)BæØÛ÷¼¹* •“™©[™nßqfÖÒŒ;'6ŽÙŠ5ØÛ.½>ü.·ÌPÚÃfýÅlƒ¤ ¥¦Ç(ÍÝuÿ!0e%hMîÍ=hÓŽRGwv©ª8[ÉÖde—ö5#*s¼$GµN·Hr”©¤ºÇîªeX­Ê.vÉœ˜¦Ì Š—dŽˆ¡x‹¤:—$E&gh¹Q:ò¶].I5%Ù²ZÓ‡t¿F¿ãÉY¥w€( ´ëÍš§ÔR©¢ûûz 9tìíÚ^ÛjU”•¦u•ª±Û´ný.½[]­w‹6©¸GºŒ5ßy˜ûˆìƒ¼ÄëÃïÛŧTZ×ÑcK«>}ï¤þÁc“C@bŽ ^_'cQ–R­VY­iz¾$Tw¿©¼´¾–™˜µr닲¸Š”•š¦ì²-ïù ·„uÊ{uŒ…™]Kƒ³Š³q·¶¦GJŠ•eÕrE8viµ5S…u)ÚZ¸UI•9JµZµ:ߥU/ïVNâП=}×x’7hszBS6jçÊx•¿´BÉOÙÔ’¾J‹$Ù+Ãïâ¬1`œDÆÊ, 0ókQ¢ùöW5ÅÙZ»ãX¿­c"BûÝ'M¦ð»$A¿ùoÁj¸Ô®ð¨%5\º¤Ÿþs³¾³qŽ”´è½ãÔŸìý£¢ÇégߌМ@IjÓ‰OO*ëÐõ~»¿xô”rŽNÐXƒˆ—e¹Tôvû‹µ)Ó®ˆî¯ê}Þ Ü-I‰ñ?e’=ðÊ p¿ýÝž´á½)*JÏ¥^TNþ}Ú,5œ=£?ÙûGI3ô£Ô…_¿ Ÿì9ªŸoÕœ‡ã´ã«ž®04¡²,]:ÀþåÚº·P……]rWÐtQŠ,ý½¸Û$ ¿:q┎\—N~ج+’Œ}…÷oš4ÏЪÏ?8«#×Ý:rð²>oö×ÂE÷MtÁ€ MY§gæö»W¡=ò`ŸÙ°kVe¯Tì çštá×Ý8„fÓüdT›êïºww’ п€DmضFýæß!0®Ú©Ë௠œ<÷üGc»Ü )AÒ¸£Ÿ“Q@bŽò¶:´z{¹î|ˆs™ lF™’ä–£ìîc‹6joN²¾÷jøý}½NXfkác³•R{Qzâ>- lÓç_^öte€aŠMÏÓ~ã6e¿tP§om=­w‹öéÝ~Ž YºY¹2%ùêž]'|A?=\§+a3ô7ëoæùȩ̈§õó=]`$bÓ¶©ø­Ýzqy¼Œ´3F$iãî·tÄ6ôà+I>#-îOv~4ÒCHzgÓ#;·Ûí–ÓéT}}½š››=V¦‹Å2ôÆ-5ª:V©ceÕªéÞa¶Èb±(!v‰W’Ýn×Ö­[ïÑeÏFÅívëäÉ“ VTT”†÷0_~ùåðˆU‚%V –´1«ð LAN§óVð5 òñññtIÀ¸"üSP}}½fΜ©€€ùúÞ£·þCÔôeõ½úÀ+£ÑÜܬiÓ¦|0%øú¿ÀTÅRgL%„_€×#üÆU`` ššš&ü¼ÍÍÍ ”DøŒ³ÈÈH?~BpSS“Ο?¯ûî»OO{Œ³èèhIÒ©S§ÔÜÜI¢¢‚=]ˆ¾º\ý¹œ÷DþmS[‡©¾ÕàVkÛ—4&ƲîÉ~ &{}à^ànÝ=¿–h}v®¯u„óÌã3=]ˆΉӴSÇu¼Ú¬oÜ=•ªvçúèX­Ú:$ùʯG¾6KAõ•zÏ^¯Ð˜P¹jëä–¯c)Þ÷”>?ß 6Clû5‡>=zJõÝm#âÑ×f5«ò=».º%8¤C—æiÙ£æGÕ«ò½º®®ýj™ÞµÇ}ή÷ìõr÷¬”5õÙ¶ûš˜,O(Ñ$u--?!Í[¦GÍM:÷ÉGª®s«C’!$F ¾öUÍè£nóµÕWê=»Kan]¿Þ¦yKõu³ÿ×ÀÜßõ êóç¡ßqöqÞGî«í÷úõýs3M'†3Ö>+è2ª™ßoÌ7iûŸÎÓC³BïÙ%œÀD{hV¨lß^ ‡gß«+'fiѼ0¹ÏWªê®'Õ_Ò±cçÕó˜R—¥ê› Lê¨;¥S·îou«¾Ñ$Kjª’fª¹ö3ož£GRSõHŒA7ΟÒù›ý|T-Wè%¥.ÓÒE&5žøDÇ.õUÏ%ûä„C()5UIqª?n×.“Ÿ°(Ú(£-½‚¯$™”øÄ<…I ›×ªnÖx­)B–ÔeZºÐ¤Žºj}~n45-ÓÒEÓÕxâ:1œßž©Ôñk¡Z°t™–¥&)¦£VÕÕõýÔ=Xm7tÃ?^KS—Ê2Û¿ÇIúé«Ïë9’qöèÊ×O¾m×tîÄ9ÌÑJH^¦„þÚTÛœ®1™bbå?”ßQx=M·½kœ=Î;Èõøçfˆcíý}èW ™¿£sœÒçç’ñÖöûä¯sËרààÞËK|å0„î›[Ô¦:ûûC:{Çá}¼®¹Em2*èÖDzwÂë39 E?5Žª¦˜õ¾ÖtL_œ¯ÖgçK†`Ý7ïkZÜ×òãAkâuï³ö®ç ãìqÞkìç¦÷9‡ø}èð `dBÔWc.Ë~¢ZÁ¾Ýÿ˜ÔŸÐ©ºE,LÕ×bý$g¥Þûtïô AÁŠùf² m2È¥&—¤PIê~×X?Ë~T5„¿Ló¿¦äùR{Ë5Õ?ªãÕŸëÜ,‹f §¶úÊáv8×s8ã°Æ? ýçf8߀xÕ€3-X¨hߺáî¹µC­mR{£§.Ë-ÝÊOC6#F&à ]ü¼FíR{£Cþç!ýW_7žÎˆ‘ÉЬË'jÔØÞ®FÇ)]veŠÊíùùJm--ƒ7qMR«ëKýŸÿüO}RÓ«]X¨åVý™KjU«êÿp®ëáN’\Ue:ôÞQ]hi—_@°¦åëkèžeïU÷pjì çzuœCª±¿Ÿ›±+˜Ê˜ù0r~‘ZgÒåãݳt¦yŠ‹¨Suõ»:tÂ(SÜL™ÎÊU×"E §ã(-úÚ<}zô¸Ê.ÉW déãéÒwµõ TD¼E ‡ô(‚ûtßôª>û{ýWÇcúÆ€·ŒŽ¦&ƒBbÒC±ºóž_¿ÙŠ›yAÇΦwù*𾙊0žU«¤Ðy‹ã}þûÃ:&É×¢˜E‹º/c¯º ¶A'ÞékÀë9Äqzýø¹‰ÆXU¯Ê÷ìjšÓóf]|:;;;=]€‰e·Ûµxñb üþ “[[[›Ž=*‹Å2â>V|ãq–=¼áàÕÚ¯Ô~Þð ðz„_€×#ü¼áàõ¿À¨ææfO— ª¹¹Y£î‡ð LA&“IW¯^õtÀ ®]»&“É4ê~ cPËÈ5^PÉ¿ÕèŸjÛÕ(ÉßÏ ôT³¾ÿp÷À.Uiã?7*íÏ—(=Ê“…Þ%22R§NRMML&“‚‚‚<]p‡¦¦&Õ××Ëår)..nÔýy0ü^Óá7Îj_@„ölœ£’»æ´þî“úEàWõÂ>ŒãÅh4*..NN§S§Nb 4&ÀÀ@™L&ÅÅÅÉh4Žº?ŸÎÎÎÎ1¨kÎè;/J+Ñ ‹no½ðkí•H½óí9ž) àU¾µp¡'g~5'X*:zRé³çhÞtIÒŒo-Ñ;7›ôXöüÈѵöhû]„…éõ š!I×ÎêŸÞ¸ âë’z/ŸLYímž ¿QÊøï.}þoW”õOW4-ب´ù&e|3V3ï.kÆ·–èouÑxF¿øÿ.IÉ3»‚¯.©øõ ::{–öoˆQÈÙ*m|ã*Ž0)cö 0éÍ÷ðÓžcÐßü¯éwëgëGóýtöË‹Z—ÿ©~q¬a€ƒœ*yý¢N.0ë…E!]›Î^Qñ £¾ó­…HÒìôç³ÛU|ì 0ÙyöiÏÝ%#fèÑoÍУßjÓ…ÿøLkÿãŒÒ-Ô»Ú6èÓ7Ú7=Zߊ¼½ùZ›.Ê­­ùÝÙ|¶{œkÜ <~?­ÔŸüÞ_{³èö£­ šc”ŽvèO§äy IDATîØÚ¦ ÿñ¥~x-Tæ(¤ç®éE˨ÿ±q±¾1úw¼Œç–=/˜®•réçÿvFšÛº¶]«UQY“Ló"ôp¯æ ÇŽkãqýlm÷®zš®Œ`·þ×ÔªA’®9ô‹¼ôÃ÷®û0“Ÿçf~çè…?÷WÉÁZmÌ¿¨zIþ~~z|Ñ,|+¦Wã:.kR}‹”ó=—6ɶi‘V”2¾}CŠÏiõÎsj•'ÌÐß>1}˜¬<øž_ÆßÊ•+=ü´g&áàõ¿¯Gøx=Â/Àë~à^QçPÉ{ºÞg€añÜ{~CÓxIŸÖø)ÜqEÿpÔ¨èž2^ò×ÃóMž® àžAø€IîŠý¼r>l“¿$©I?þ§ÓjUò6™´ÐõÜ+Xö “\ø ²™}ÔÚýu«ŸA•™@ðÂ/0…¹ªJ”›•¡«UVk²ÒÖmQ¡ÝÙ½·R6«UÖì9ìeâUÚ¬²Zûùc«œÈJ&ä]y¯J9ŽÎî™_É¿½MÿPX¥ÏÇñœÞ†ð LQ-•6­[[ ÷*›J+*TQQªWÓ]ÊÏzJ[ޏÆþ|5vØJÇ$$&æT¨¢¢B{µF’’¶ê­Šîm9‰cp†ñ1Òkn™)ÛŸÆiÏb?IAúÛ¿˜+ÛŸÎdæ`¸ç˜’ªT´¥H§W¾ªÂ4³$I¡JÈÈSYÆxœ¯RùOe©(i«VŽG÷÷„Q\ƒiQzx¾¤p—þ* P §G)dú8”àŘù¦¢ªr×JI–øîà;½—ø:U’m•ÕjÓÍ…Æ5Grµ.-YV«UÉië”{¤¦»ÝzIRùv­°f«Ä)µ8J”».EV«U)9*¬tÝyž,›lÙɲZ·i¸Ñýö]i“ÕjUVa© ³Ó”lµ*e]®JÊz´Ï*TUK¯1gÙT»N)V«’Ó¶¨ØÑÒß©å´(»ûX“Ó”]X¥–a_ƒ~D˜•þÄ … ïr@„_`JrÕV«V’Ù9v:KµcÓ¹žÙ«ŠŠ í^Y§›¶¨¸&Réy=—'ç)=àˆ¶gnWy¢M‡+*T˜éÖ®õÙ*®éÑŸÝ¡øÍeª¨Ø¦”Ðá nð¾ío×ʲ³T‡_]%÷±Ú¾£ZiyGT±÷…ÚwiSQÕ}Ú+¥ŒÝ:Rñ¦râßÖŽL›ì}å_W©vdåK÷«¢¢Bû7F¨|×&Uð`Ì~Œ·$étþóʶ•HûUQQ ŒØ»[ºìoëm·”²Ü¢PI±K—+IÇT\î¸Ý(>E‰};˜¡ôm´X” Ä'Ê"I)Ëe •c–YRmûÎN-Zž )VKW&Iî:RÝÇÉCÓd{ÿMm4–+/7G9{ª%ÕªwwC¾3Üó LA¡12Jr8œRâÍþF.ׯªr—]åEÛU^´]!–µûÕL%ôZ[ÝâêZÞ[´ÞÚµø&Ge¿Æ-ï°ïø®¿ZÌ1ÃëÔ(»ÿk–TÞ}ízµs–*û©—d_£Í99²%¸µzGùðëÀ˜#üSQârmˆÙ§|{µZÒ#‡qßoO r×õü:@ ™»u$Ó%Ge™Ê öh×»»´©È¢’u½íN’köVèî‡3òUEõ]ùöÈútߜזœ5I7—Œ×ÞÑÌY~PåniMNŽÒ¥GS¾C©cŽeÏÀ”” 5¹k4÷ M;Jêº}Õ¥ªâl%[“•]Ú×ËxBeŽ—ä¨ÖéIŽ2•ô\ú[U  «UÙÅ.™Ó”¹!Cñ’Ì’"o‘TçRƒ¤Èä -7JGÞ¶Ë%©¦$[Vkºl•ý?Hj¨Æ¥o{Šì.I5z÷`¹³FËãïnÚusrMSj©TQ½ÇÞ‰»¸á˜¢sTðú:‹²”jµÊjMÓó%¡Ú¸ûMå¥õµÚ¬•[_”ÅU¤¬Ô4e—EhyRÝ ë”÷ê 3»žtœU¬˜»µ5=RR¬,«–+±K«­™*¬KÑÖ­JªÌQªÕªÕù.­zy·rG6}‡ÐqèÛ’¡EU›”b}J¶º•Úš·Q}uš²Q;WÆ«ü¥J~ʦ–ôUZ$É^éЄ^Üŧ³³³ÓÓEÀäT)›u½Š’¶ê­¼tá³±0V®\ÉÌ/Àû~^eϯƲgÀ”@øx=Â/Àë~^ð ðz„_€×3xºžáv»åt:U__¯ææfO—Ü!00P&“I‘‘‘2£îσá÷Œ~±ó¢úMSÞ µ°ç®/?×S¿iTcX˜^ß —ª´ñŸ•öçK”å©zïáv»uòäI+**Jž. ¸CKK‹nܸ¡“'Oêuöð̯¢ 7tøx›.¸]Êç¸!ùùx°.À»9Î[Á×`0ÈLJÿß0¹)$$D—/_–ÓéTllì¨úópøõUú{U%Ó‚õ£i.¸ÙèŽeÏ ú¼äKý°ªM’fÇ|E›1_s»ÛÖ9ô?‹/«äº$?ƒÒSÍúþæ‰0‰577kÚ´i_Lj¾¾¾š6mÚ˜<£ÆóŸ|§G*uú þ²ëË3Ç>;\áýµ?îЋŽíú«GôΦ=£«úùïëºw^Rñ—õEÌ,íßôˆÞZ¤/Ñák0àÃRgÜ Æês«çï¦kñl_½₤Kúè¾J]Ñs?_[šURV«3AJ]ûˆò¿ÕÝþìß0*㉅HÒìÊß´X©Ó'`€Ik„_iÆâi ?qUgÎ^QI{°™=@ãùóµgY .;§ ÿø©Vü¿GUt¼¡kßµ6]”ŸL„]@¿çW’eRªß9í{OrÏ‹ÕIúmì¯/ÔÏ–Ô\¯Ï;ôâïÎhñ‚…Z8Ý hµ¨þš$0 Û¤˜ù•¢ôÈý:RÛ¡ÇçͰeÃGµbÏqin“C4#Ê c€o÷2çpe»Uü^­$éÚ)ýÝ/*ôOœ€!&­É1ó+i΢EïPêüÛ…Os$IQÊøö ](>§Õ;ÏI~~zÜbÖ_Ü?þõSVÝÇzc_‰><}Em2(hæ=ýìZ-‰tñ^Îý@myIË¢=]蘱¶ªµÕ_þþ£+õ³_þ@¿ªìggâ³z幇Fw‚Á ëzŒÍ˜âÁð;G/lšsûËÙ TøÂí/g|k‰Þ¹ùÅïù ÓÃé_Óþô~º0ëûÌúþ˜× à.é繇”þ¬¶fÏW˜«ÎÊSþßÿR†?§qŽWkBÆêÐýBWž~E£Í¦=÷Š^éþûg¿ü~¥ ¼#2vcÈ$Yö àÞÓª”èÊ£Ï({Ù|…ùK’¿f-{N+Â+õVÙEO8†&j¬×uÅ5F]Ý3¦â˜€'LšeÏî1­ëƒãÒ‚ï=ØkG˜Rþú¥HÒÅJIm:÷ÁëzùÝu©Í ¨äçô·T°$Õ}¬×_{]^j“ QJ~î}ûÁà®núÝwC_¼ñ ý²ì’ÚdPè‚'õÜsËd¾¹döúz#ï—*ë>îѵßÓÚ%ý¼>íâûúÇ×èø•6I½jÑXoöÛ{Éïgúå~%=ûŠž{¨¿ú/êÐË¿R¥$ýêúå³Ý3¡ýçâ!½œû…¢¿vQ•Ÿ¸”øÌN=·dë†ì÷Íû³y:QR¦KmÍMAO•èµÇåj Õ¼§³•ýxôí¶éfUþîC¹ªO>§g–™ï¾†}^ëp• gÌÀk¼úê«:yòäÛxà=ÿüóãr¾ÆÆFf~ŒÐ—®(J³Âm¨ã—T¶íí|áQµ•íÓ¿~)Içt Ÿó¾§Ÿ¼òŠ~²6ZŸ¼öšÞ¿>Ⱦ/~­×*çé{;_Ñ+¯lUºJôú»7g^ÏéÀ/^Ó‰îãv¾¨óûòuà\_uÓ×~-­ØªW^yE;7>ª¦²•Õf¬CÐoýÑZöÒ³J””x3:ž:ôœv¾²SÏ 'øÚï%}rb¾²m¯è'™ótºäïUxn™þÚöŠ~œ¦¿þ¾Ðí¶e‡ê{?ëúþºJ^Ság­wŸ¯Ïk=’1oðä“OiÛX"üg¡ztÙ…Iò7?¨™jRS“$LJúøÊ\¥<9_a’–¤+9ü´Þÿ°nà}¹Žëý÷¿Ô¹Azì/_ÑK7Ÿ¨äøP_Y e«ºŽóŸõ”–%^ÑÇ:ú¨k–Vm}E¹Ä ëuøâŠÚÔ*µóå¨þÞO¨3Ë_þÖ³¢†ÐïÍïY˜y¾¢¥%)]m#¢Â¥®+uK⊧4Ëÿf?MªüøD¯ãZë{îUóçÏ×}ÄRýÙÒôÚk›ôC”]ûgzôD¡Î;Z¥è>¦‡5Ö‡”þôåø{mzÇ ¹KŸVrÔi¹$…=>Pýf-I Õk…›´«í'zññÆ3ª·+e¿ášö™~ñÃ_w=íùé´v~¯ë1àµƘ€×™ˆß›|:;;;'ìl&»Ý®Å‹Ë`à÷_…»^é0öÚÚÚtôèQY,–÷ñä“O²ìàý¿¯ÇšGÀÈD/ÓK¯,ótCÂÌ/Àë~^ð ðj „_€w›>}:ᘊÕÜÜìé2€A577+00pÔý~)Èd2éêÕ«ž.Ôµk×d2™FÝᘂ"##ÕÐРšš555yºà.MMMª©©‘ËåRddä¨ûã=¿Àd4'§Ó©S§N±“N`` L&“âââd4GÝᘢŒF£bccëéR€qDzg€×#ü¼áàõ¿¯Gøx=Â/À«5¶~^îÓ/ë¿ïê¬'ü¼Ûu5~ÞíßX$ŸÎÎÎNOÀx1E-fæàÝæ-Š%ü¼[t°áàý¿¯Gøx=Â/Àë~^ð ðz„_€×#ü¼áàõ¿¯víÚ5Â/À»…„„~Þð ðz„_€×#ü¼áàõ 555ž®€qeˆõt Œ+–=¼áàõ¿¯GøxµææfÂ/À»~Þð ðz„_€×#ü¼áàõ¿¯Gøx=Â/Àë~^Ï0ÚÜn·œN§êëëÕÜÜ<5y­ÀÀ@™L&EFFÊh4zº C{{»\.—ÔÚÚêérÆ¿¿¿BBB*??¿!7òÁH?×»šÛtä¸S_œ¿&WSÛ8Vè¦ôÕYÓ•² R¡£Ž®òéìììéÁn·[§NRXX˜L&“‚‚‚F]7kjjR}}½®_¿®¸¸80À=¢½½]/^TPPBBBäïïïé’Æ]kk«ÔÔÔ¤èèè!à©’Fò¹ÞÕÔ¦üC'ån—|ýüäëë3•ÞÛ:::ÕÑÞ®ƒôýeŒ*?ùä“£[öìt:¦ØØX¯ýÁKAAAŠUXX˜œN§§ËÀ¹\.Éd2M‰à+uÍüšL&ëúõëC:fªäƒ‘|®?rü²Üí’Áß@ð"__ü ji÷Ñá/.¾¿Ñ\__/“É4ê"¦“ɤúúzO—€!jhhPHHˆ§ËðˆiÓ¦©±±qHm§Z>Îçú?œ¿.ßa,Çm¾¾¾:^ã}?£9¸¹¹Ù«£3^‚‚‚¼öþoÔÚÚ:ef|{ó÷÷ò=ÎS- çs}cs3¾#äëë#WÓèï³çiϯGøx=Â/Àë~^ð ðz#K°§8í*Ü•§}GŽ©Î¢¹K×iËæu²Dz¢–e¯x[ËßÊSº'΀{Zcc£þå_þEçλµ-$$DO<ñ„{ì1ùâÕ8—.]Rqq±2225厓JÙ¬ëUöÌ^f'*àÖv§J²W¨zC…rGÓ‹ZУߩé±û¿"«yº¦$©¡¹]œºªþxuLÏb”¯tÉåÓ~Ç‚Gf~[j*UYÓrçF—C•Uƒ¼ º¦DÙOåª:i³ö—U¨¢¢T¶åÕÚ–eSeËÀ‡“Õ³Ï>«Ÿþô§úéOª 6èøƒªªª<]Ö„:½Ï¦ƒŽ±îÕ¡ÂÌTåWŽu¿“CdˆQßy$F«-3dôë?Ú¥$DhiB¸®ÞhÕëÿ§F{Þ=+gƒ[ÏÿŠ–Ì ³zf„èÿúz¬¹ú˜õ9–&<ü¶TæiýêõZ¿:KU݉Õe—mÝj­_û”²Kjú;Rö‚rlÈÕ¶ô…J’dNÛ¬æbí9Òßq€g]¾|Y}ô‘ÚÛÛmk2™4sæLÕÔL­Ï·II¡Úa+ÑØŽÚ¥Úêþf *-(–ãD‹ 1êÉÄHÅE+~Æ4=õpß³ûáÓŒZ0cšêÝúßtúJ“œ n½[uE-­Z2f5ý|d „{Ú„/{®­<¢®Ÿ¿cÊ_Ÿ%åmP]îó*:-InÙTÊ™«»V·ØUz@Z¾7¡×ŽP¥ÙÊ”vóËš#ÊݲCÕÉmŒPÒÆWµ33AÎe¯8(óÆ¹ª,8 cîû$ÉiWÁö-ÚS^'E$iCîV­»¹–ÚU©Â-›”ß×>`—/_Ö¿ÿû¿+%%eH˘ëëëuñâE-[¶ì®>N:%£Ñ¨ÔÔT}ýë_—ŸŸŸ®^½ªßþö·:~ü¸î»ï>­X±BóæÍ»£ÏÖÖV½óÎ;2 JII‘Á0ùî~4¯Û(KîzåIQnJèÝ úùLÞR–«T[€væ(1@j©´)s‹´eÿ3ªÙ´^E’´Þ*íí½|:R‰1eZŸåÒ«»×)áZ}3øÎ’¤ÎN©¥­£Ï¶q‘A 2èØ9—Üí·Ûœ¿Ú¬ÿyø´$ii|¸,æéjiíÐô ƒŽÕ4èÜ•f}c¾I!~jkïTÕ…Fý®ò²Üíúæ|“¹ºýý¤Î®¾>9s])  0øèáÙ]³ÉÿþÙe-™3½ß~&Ú„Çrsfv¯™Ûõ…û˜ò³n_ɘ´Y…¹iw_IrÕ©V™cê½FÅ[¶Hö«¬¢Bïï}Fu»òuäÖjj»ÊêÒ”w¤Bû7H»6©¨ªû¸MY²'½ªÒŠ •íM‘=;_G\Ýû²×«üÖ¾å²goRñÔúEF gð5›Íý¶ûÕ¯~¥ýèGúÑ~¤]»véPLL×ß––½óÎ;zôÑGµ}ûv}÷»ßÕ§Ÿ~ªúúzµ´´è·¿ý­î¿ÿ~ýøÇ?ÖêÕ«õÞ{ïéêÕÛ÷q¶··ëÝwßÔÁW’dLКmkTµ%OewÍÆöÿ™< 9G;-ÅÚ–_©––2ٲʔœ»Q–€H¥çíÕIkî ¾]bÓlÚ»¦RÏg¨jÎ?p_°²ÿÛ\ýÕ2³Ïêú…@_Á÷5 *=Ö÷-¤ÓüÔÑ)5º^u`ôóQCK›~ùû³:í¼¡'âMºÖÔª}ïŸW…ãšæGOÓãóMzà¾`-™;]§ÍÊ;tZŸœ¹®è°M2èðñ:µ´uêÓ³×õïŸ]Ö÷÷Û'xà'?T–œíV¦²ŠjooNzQ…;3dÕo\b•QP&©E.§CŽj‡\’n/tˆWf†¥kÉtl¼ÌªU[’£\ÅÇ–kC^÷rêØ å•etâ(Wñ±UÚ¸ûæ¾t­ËØ.[¹CKGS+¼Y]]~ó›ß(55uÀà+uÝó{ÿý÷K’®_¿®ƒª¬¬Lßüæ7 ï|ç;joo×µk×téÒ%µ¶¶ª££Cuuuº~ýºd0£gŸ}VRׯÚÚÚô»ßýN&“I+W®œ¼Á·[@âmÉX­mùvíÏ™{{Ç@ŸÉ3ÌJÎÙ©#©Û´Þ^«–ŒÝ*Lz¨ˆM³i¯;[™YFíÞ9©f€ŸˆWXP×÷,uA„üýþÿöî.¦­óŽãø×Û‡›;²:šÂ’ÖINÐh7Á$–ˆªä¢T“Ö]”Ü„š‹r¤©©&ÁMˆ457‰”B*µ¹XUMU­4]¥KU©nµ n!iR’4n3^L1Æ66ö.t$@Í[œšßç Îñyøë\3÷øB ð+"""" p:lÚ´‰[·náõzÓÞ¹Ùétâóù¸xñ";vìÀf³ñÉ'Ÿðá‡RZZJiiél[±Øôp¥Í6:âr¹ddd„’’’Õ¹¹5c÷¯ªÝœ|ö5\3‡ë“Øžâ…Ö.ž?\Ç¡£UKÜÙ9€¿·ªÆ¶Ä­¾Þ£Øî°˜±ÛrøÝv&“iIÁwt"€Ãx0úý¢ÜAA^.f“‰d ɆՌÍbfw¥›Ý•߇¯¡ñÅv+On ¬Ð`2‘db‘åÅÚÉ„‡~C~ºš[¾Ÿêlµ2999=zo t] WR÷ìí£µjîÝú%Z†;¸üb€#‡Cì;u¼@à,-'z¸&‡— ü ‡€û°ÃKÏÑúvžº¿¬¡ôîYDDDDÖ‹ÅB]]çÎãâÅ‹ÔÖÖ.ëÕEÁ`Ï?ÿœ½{÷R^^ν{÷¸sçð}èÅbØíö®ÍÏϧ¾¾žÛ·oóÑG±gÏ,ËÊnl­9êxù•ÓìéìáYÓýþÅúä³tqP[ÛO×1?5m¾4p€³/½@o]‡š¼Üë>»=LúóìèêRF|ïcŒG<^`£Øne$<=/ÖšcfçærL&®ß›˜sMt2I,žäBÿŸÞúnιßl)¢ÄiãÓïèý×µ?+¦zsá¼{±v2ᡯùíëyiîß÷ߟ»¸óÝvx³ák>€÷ôA:Ïö3ý?žË´óòÊ‹3“øCÓ!–þ“=ø™üÿyÏóóÖÐTÙËé™vC—éjlâ­™sg8yz期®¦§h¿Zþ— """"ë‚Íf£¾¾žH$Â¥K—ÒÚíybb‚+W®°uëVòòò€éu»ñxœD"A__ããã¤R)\.N§“þþ~‰Ín‘·:IDAT###twws÷îÝ9mVVV‹Å~4¯Orï>ÀÇÎøÿw`Ñ>y€³p ƒ®ŽƒÔ]>ÈÿÌ^+®Ç!šoAïï?ÂÁwÆg·Ç8u˜h|:ä.uªóHx’«ß†)²[h¨tóDqî|+{~¹—ÝÊÕoÂ$SsGb†#$’)¶—åSVh°Ó[ÀË»6óû¥³Ÿ±ä˜(+4¨Øh'ǘ 1•"™Jξv)v¦‡>ò[µ¯›Vÿ^Ž9Úf×øzÛz8J3-§=¼ÒÑ„g¡‹=¼öއžÎv_½Å8ù ¼¼œÂÂùGê[[éùøÈìï õÉû{šéä§=€‡Öö:ž?x„ºSmøl^|MÛÿ4ã‡Îß·‹´_Û[Ôy=lð13üë-Å GøÛ×ø^è&–H²Ó[Àk¦ÓÖxtŠK׃||#HmEñœÏßœàã£üê§…¼øt©TŠÀhŒ¿_!™‚ÍîÇxr““íen OàÈË¡ø1 }_ó]$Á6O>¹9&þúé· ¶“ ¦T*µì ×~¿ŸÏ·jÅÄb°Àr…¬³Ú߈ˆˆˆ¬¯¾újvCªõ(Ýû_}ÜtïùO§¾ÀÈËÌFOÙ ‰òçç·/ûú†††‡?íy1ë%øŠˆˆˆˆˆÈÃõH…_‘µ ð+""""""YOáWDDDDDD²žÂ¯ˆˆˆˆˆˆd=…_Éz ¿""""""’õV~ à ‰¬V-ëF$Á0ôŽ/‘ ‹ÅB<ÏtDZX,i}v½åƒ¥ôëíF.Édj+ÊNÉd G^zÏàbV~‹ŠŠƒ+.b½ ƒeº IS~~>ããã™.##Âá0v»=­Ï®·|°”~ýö2'É©©5®(;%“I¶z+ngEá×ív366F XWÿáY®H$B `ll ·ÛérDDDD$M‡ƒH$Âèè躎Ç㌎Ž211ÓéLëšõ’–Ó¯¯Ûºk$SNS2™"‘˜Â–“â·Û6¬¸=S*•ZÑ7?99ÉÐÐÁ`h4ºâ‚²™aáv»±Z­™.GDDDD–`jjб±1ÂáðºÀ‹»ÝŽÓé$'''íëÖC>Xn¿>Mpþ߃\ „E²ÿZ)»‘ËÏËœüvÛFîŠÚjhhXyøy”544h·gÉ~ ¿""""""’õ~EDDDDD$ë)üŠˆˆˆˆˆHÖSø‘¬§ð+""""""YÍd2)üŠˆˆˆˆˆHöSø‘¬§ð+""""""Yχ3]ƒˆˆˆˆˆˆÈš‡Ã˜ÍfÌ_~ùe¦kYׯ_'77óo¼‘éZDDDDDDDÖĉ'0 óÍ›7yýõ×3]ˆˆˆˆˆˆÈª:~ü87nÜÀ0 ro¿ý6W®\¡¹¹™-[¶`·Û3]£ˆˆˆˆˆˆÈ’…Ãa®]»Fww77oÞ¤°°³ÙÌBW&Ùá5ØRIEND®B`‚magnum-6.1.0/doc/source/images/cluster-template.png0000666000175100017510000014233213244017334022350 0ustar zuulzuul00000000000000‰PNG  IHDR…ð²^‘ pHYsttÞfxtIMEà * Ì>2tEXtAuthor©®ÌH tEXtDescription !# tEXtCopyright¬Ì:tEXtCreation time5÷ tEXtSoftware]pÿ: tEXtDisclaimer·À´tEXtWarningÀæ‡tEXtSourceõÿƒëtEXtCommentöÌ–¿tEXtTitle¨îÒ' IDATxœìÝ{|Tõÿñ×Ì$3 ¹@.Ü"h0L ÐEê‰í×%® nÛ kü¹†v v t+ì£%j®•^n…ZMÚBl%ê´-$*& „[¹A&!™Ifæ÷Ç$!÷û…÷óñàá\¾ç{>çÌ íyóý~¡¬¬Ì…ˆˆˆˆˆˆˆˆ +ÆÁ.@DDDDDDDDžB!‘aH¡ˆˆˆˆˆˆˆÈ0¤PHDDDDDDDdR($"""""""2 yôy.¸\8ëjqÕÔ଩ÆYkÇe³á´Û¨--ÆYYÃjÅ~ásê.—Qw©ˆºò2ìee¸fÎgê÷ÿ³¹ÏK7C_Ü’¾¶ä_lû1ÔTãr:ÁéÀe·ã¬ª¤®²gõUp:›mãrµ±[ƒ—Ë…cêLßü³Þ–uë={šÊ¿¤á=o>^ÓïìrDDDDDDDdé“‘B¹Û^¢îÐA,&&“ƒÁè‡ ÷È¡vFÀ€Ñ`Àêÿë‘—Cο=Eôæm`2õE‰7¤ÚsXSѤB!é–> ….;\”VÚ õ5㘠Nª»t êjñ¹©OúkˆÛ 7Èù‘Ó7k 9ê(µÛ ÒÅôq£ ?FÀ…ÓåÄà2àÄECŒÑ8ŭ†\8 €ÑfÃår5{w¨*eÖ7vô½ôY($"""""""ÒS½…\N'µ55x TÔÕ<‡Ã8êCƒ;24v ƒ;#ªÙ€û©ËåjÕvHs¹Ük%ÕTƒÑˆÁËk°+êöÊJì]nmÆ×W ‰‹ˆˆˆˆˆHÿp_£êÚ³3½…ÜŽ îûÛ{ë×26¬+T¿VP“µ…ž \&uv;§³‡Çµ¶] †Ž?Îo¼Ãá`„ <úè£øúúpùòe^zé%æÏŸO@@{÷îà®»îâî»ï®_ÓÈ­¼¼œ×^{’’L&?þ8“&MàoûùË_X¼x1§N";;ƒÁÀ—¿üeî»ï¾Æ>œ••”lXKÍGÙøÆ}Àï>ǹ{ç5®¯TòÃïcÀÀˆø¯”ô=*ÓÿÌåŸýWu5.§ÿ‡—0jù¿¶:NëÞ¤â~£ÒŠÑLJ±Û~Ùæùøbñ?â(¾ÄØŸþËmÑ]:‡½e?ú®~‹Ú.oáÉßoþ#ÏLïí³Œ}k!#öu6Åvc³}¬y$ƒØ×7ÑÍDDDDDDä:g?EêšÕ¼üI¡qëÙøyŒë³ÜXס½_ŒÆ``|ÌlŒ¦kù’ ÷"ƒÑ#Gb™€1r*¦9_Á÷5L?†çcËðxü›T'ü#ïœ8ÁÛÇSTQ— ‡Ó ¯§Ž}úé§üîw¿Ã××—ððp>ÿüs~ö³ŸQ]]ݬ]nn.{öìᦛnÂårqàÀrrr߯¨¨àç?ÿ9¥¥¥DFFâr¹øÍo~CEE7Ýäžîõ‡?ü#GކÓéäàÁƒäææ6öSüïÏPóQ6^wÄà3ûé<¼cfã1z æð[ñúÒÌ·L 2õ÷xN Ãkö—1úúRñû]\Ùù«fõ—½ôcÊö".‡ï/ÍÅó¦ GŒhu>Ê·¾€£ø¾÷ÿý€B•E»Ôr±¨²Ÿª‘áêTêúÆ@à|úzžO+ÄŠ®o½_SÈå¢èÃ,uµxÝ‹GqßYÌéiÆôôw0ƒÁÍFÕ~šCÉÕ«àrqìâE>-ºˆÕVƒÃ{ñߎ`tw{ã7xê©§ðòòâ­·ÞâÈ‘#dee1þüÆvååå<ôÐCDEEqòäI^{í5þð‡?0}út>øàl6O<ñ“&Mâ³Ï>ã÷¿ÿ=ûöíãŸþéŸððpŸ¦ššž|òInºé&>üðCöìÙCzz:QQQÔž/Àvâ#bï!øû?ÄUW‡«Ö•ŒÞ”LÙ ÿEå¾?ã÷ÿĈ{ã0ÔÇ]c~ús F“ Û±O¹øÿþ…ª}o1ré7°ŸWÆé£­¯6Ÿ+‡>+Ô#×éuhïתŸÖp;ùÏ.^ÄîpP[WǘèéÜ€ÁÜÁ4¡ú…¤]€ÑQË/3á~#ñ21e¼ƒë¶Û1x´.³a„Îm·Ý†Åbàî»ïæÈ‘#dgg7 …Ìf3QQQ¸\.n½õVFMqq1çÎcâĉ9r??¿ÆébÓ¦MÃÓÓ“Ï>û¬Ù>GM›6½{÷RUåN V«»‘Ãá>×ê6q5aF#ãµ «i¸Ó0º§îÒµ/qùO@Кum –ÖAYå»pÕÕá÷ØRŒ>] gúRhìã<:ñ(çCçás4ôcem¶7ïQ憞æ'uVt—žÿ)éÇË©õ `æ·6²aÑdÌïý„·ŽÛ {œÛ2ªŠ7kí,þôe>±û-|Óü;žÿÉ[·‡÷Üz¾3o\óa{ìcÍ#é„~3”c¿{‹ãöûkªì(i©Gh+c #aÁÄÖÛˆˆˆˆˆˆÈ db¨'d5ŸË2eb@ç›^¯×¡Ø9·?ôÓm]ccÖ¢¦÷b:ZŸÜ}ÌÕxW1'Eå%x™L˜M&ªÏÁYSƒÑ»õT§¶Lôóåæ‘¾8]î=}|Úm{ùòe‚ƒƒ¦öõõÅb±4¾× $$ ±ÝÍ7ßLqq1W®\¡¼¼>üðC\.N§³ÙLmm-555ý4L#kX÷Èb±PWW€eÊ4Œ#|¸úA—_Þâ­Óŵ‘êŠ/aMy{î'Í^wVUb?™‡ÑÛKÔôv·w”Sùö[øü]¾q_ëtýÆ'ÀάYáí´é¬“"Òžÿ=žå‹ýT*O?õ+ÆnbA`iϯçâ‚—ùÃÖÉpô¿yz5ŒkØ6‹´c²åïÀ‘—xxÝ7¨üÖ6ýñìû×ñÈói$üù[LmµÏOÈ*{œ-|ߢ4Ö|}=©·ÿ–G'·hf/â`êë-ÿ~<™òT, šˆˆˆˆˆˆô«©'óYOó‡óîçžS¾É·c;%t_‡UÇÒx½á€š cê¢Þ]‰öÍ-éë1òwÆáš8½½0Õv}µ'..' ·$suж´´ ÕÊFŒÍfÃét6¾fj1­áù¥K—ƒž¢¢"öìÙÓØÆÓÓ³ÕvmŒXjd4ðtå/þ˜Š”×±ågôÿ £ŸGÖ=©”oIÆàáçM0˜-¸ì6\uu`0` L£³îIÀ`Üq*•çpÞNàé#ioÊæÔ±„vØË8¶þ°SYvŽó§ÏQ î;›Ë"íx,_ß4_€éòõ™ ½qÛp%Lw¿>•鵉­o;n,Ô¶w´&Û '”‹”µõµϦ_Úoln y2å›[Hn3Ñ‘Q´Ÿuÿ²‘CÜÎ7¿ó2[|˱›}±ŸNã§ßǹ‰¼å[LmóÂí:¾ÅÌÔoÿœ-ü O7 †BãX¿ñ;Ìëå¢Õ} ¹pQn³ã]žœN\:\,ÚU?Ý  º®£Á€ÃÙ囵«éÅZí³~Ê[ÓgòäÉ|ík_ÃѤ—Ë…W7n!ïso<–iQÿÇw±å¥xͳŒyéçmN¨ù8›ò-ÉxŒǘÿ1c)Z±ûéS]Þ'€ÁlÆ8r•IÃïÿ Ï[ÚˆÀ¸Yhö…ØGÛ~̾Ø÷wÔ‹S©kXýòiçsûL3“áªÎqš_d\³àÙ£™sÝØ®Y0T=ª@HDDDDDdðTrð~¡*€OøÕú§Z79ý:ϧÆòÛ¶†â\ïס-ƒ¡†@¨–Iê›[Ò7½Ý|7·÷›Šÿ„P®œ?GÓ½¾Pc'$CãÆ¹¾éˆ  ÕÔ1 YÐ`³ÙûhiTWWG``ïï çrc·þ‚ ß\BíÙ³ÔæŸÁ<9âZƒ&ÇTñúk}ïxŒ €Ëqíx àrá(¾ˆ«®ýWþü3xxpeÇv*~÷Ak~ÐûT­Î¥ÿ„OnÿcSWó«ãm·ñIXËsžtRv_½\Åã¿ú#‹&EûHz=£~㉄s”²JpÿB+)+ïË#è¢qñlúu ©GHˆW $"""""2¸Ns4½óYJÓ²8õèdZÅBCá:´>z9<ò鋘ÕGëf÷~¤Á@Ȭ/qþÐ_¡E@ãr¹:žØklT~F£³Ñ„. pÖO£jK@€{¡¨sçÎár¹0 ”––âr¹;vl³¶õ òóó÷zD ý4¼ÖŒ#F`žx35Ÿ|Ôl$и5€ýø1wûQ×½ªÍ?s­_LÁ£©»XDÍÿÂ'î«mîÏ`öÄ÷E\Ù±êƒïã¸rÓ¨.,¤ÕÇ&Æ=Ç8³ó·73µ½rލ¤¬Êýߣ©¯ó ÄÙ‰sùÇ™?åW¿I`ú·§Ã±4~—¾3ûð ºjÜ, òâõ""""""”ÑÆŠ;­],£ýèh\‡bfrü¢>í±O¦U]ºä-äjž\Nœ¶p¹ÜoÀYqÛç¨ÎûŒ’ÃÃäpR‡‹òj—”QRcÃîp2gÅL‚Û™zÑð”——Ⱦ}û˜;wn³¶.—‹wß}—{ï½—#GŽP^^Ž¿¿?AAAÌš5‹#GŽðË_þ’¥K—îÑEf³¹ÃihÍ8ØÏœÂsâ$œ••ØÏåcðôÄXg4ƒÁ.¶ÜOðþÊ]Gø`¹-šêÿ;HÍßâ9!”Êô?·êvÔ·Ÿ¡dÝ÷(ýñð¼% ÁéÀ8¢ÉŠÍ.w€ä{ÿßSùö[\ùåË®^Ûµºû@`ø,ÊŸ~„û6wqÏpžÚÒΨ¬Ày|ó©tÖ&ÝGªïLî)¾™µ–cçíÄ $þ¹-T=¿Ž‡ï«Äwæ·XGûìHDDDDDDdÈ1ûÒ¥™XžíÌóÆ×¡†²²²NÆò´ÍårQWçÀåtðÞwžá‹Ì¿bî ¸°;]X|§ÓÉüùóq^­ÂúÆï°–f ~‰ã5#¦Ù>®þõU{ÿˆËá`äãßÀrû jóÏpù—?‡º:ü[ŠÓVƒí£lF=ùíæÛ|ŸÊ´?‚½ψHF~ýÔ]ºÈÕ÷ÒñþÒ,Q·7¶«=}ÏðFÌ›ˆˆˆˆˆˆˆH[z)t.÷÷l®ÿ³ó<§,þ$.¸™é}µƒn eë7¦ðLÌ ì\DDDDDDDäº×?ÓÇŠ‹øq¾ ‚|ˆïî¶žà6÷r”øöÁ(¹‘y3+Ì»Ãáaþ„P5"""""""­ß¢“q&•P?%Ì›²RƒŒÿŒgÏå¿æ1Ñ  ŽSŸfÅ»­¦œÄïÃäúvçŽÎ÷ÒŠ¹Œ~K³>ÜïùîÊúélQ³ØÁžx»¿ŽR†&¾ûÄ­Ä9¯}ïZ˜uïm|ÆÌ¥%ü`ÇYŽ B•"""""""ý©_F Àã¡ì—*Hi|Õ“qŽK,ßžËÞ÷ãû ‚¬(â?·å?×2qÆ-lº­eOãøþWÇ0®¢ˆÿÜ~„åï_%pÊD¾?»þ½Æ>ްüý§ÜÄ·#/’´­„s¸§´)’VæßÄ‚ #àÁäá¼r¯³·!_ÀÈãó¥J‘~Õg#…&FÍ⽨kÏ+¯\aç_Îs±ñ'§Îžçt0?ŒÉµ|ü·ódTiÅ, ›È´¨Ñp²I§ó˜ìaãðþúv‡ÏrdútæMž¿æ}>Æ?î«£‘Ú§ùŘi*lj½ªþ¡ 3u”ŸhÙ¦ÅÀ%f,|å‘Y¼×ôõ:ÏúéŒÔ”Ï ±y0´cL-!ÞÍ¡•)››"""""""7ŽÁY޹Ê ‘@³PÇÙF;‡_ÿ„uç[ô1j;}ˆtEë`hbHÃÏAˆˆˆˆˆˆÜøúçîcù œSužLûR(±þfbF3Í«ŽS'‹ÛhgaÆ‚Pbýalä-ìxfo$µèÂo»…×VÇðÊ€:ì€Ùâ7'C‡;úÅÙZì¯)‘áapB!ŠøÑþRÊüÇñ˧ó“=9÷q?nuú¦íf±kae—øqJi«÷¶u$œýœì¸ÌéR'ãn‰~t2”4 †‰ˆˆˆˆˆÈðaø»Ÿv vÍ´¸%½ÈÀ03=ÌBQ¾µ_¡÷VÏê·¾­ùÙdÚGFZ6ÙyTÖ¿î;)‚˜˜Dâï&>2¸“^rHžù»XÌŽ¬$¢û­â¾a=±—äì0Ö/¹Þ+u+Ù»Šû7döAOsX÷öVvö‘Þ¨r’™ù„û›Êâd%õïç?Ô¾g"""""]1H#…Ú7v¤f”ŸìJdx±s´Ÿ¡þb=±—ËbYðÐ 6¼Ê&@eAR7ñÜc÷3wÉzöæÛ­Ö>eËgïú%Ä?¶´ ƒ]ŒÜ°ô=‘Øu -zävÅŒÀ^XÎÞŠÁ®Fäzgãīˈl©¹•7ìyilxh ÉÙÖ~®m䥰!-¯ÉzP"ý@ß3¹ ÎÝÇÚ‘úúG¤v"C‚œä%<±»àÚK¾QÄ-_ÌÒØ¹„…øaihi-$/c7Û’w“] PÀîËz-…e‘ƒPú0¼p+Y Û?'y&×fEeÑϳ¢DDDDDd»®F ‰Hר%³¢I 4)áyöd¼ÊÆ%ñD6 „,~!D/Lâ•}o²f޹þÕ¶­}•Zµˆˆˆˆˆˆ\O ‰ 9'ؽ)µq:‹yÎ:¶®'¤³Í,a$nÞÊâñõÏ ¶³;ã˜F&""""""=r]M‘ÎY3v²­aÁ[s7.ì<j`‰aùÊ»ÙýÜÀNÚÞC$ÅÆã×ÅÍ›Þ9kκ·ÙÚÁ­¯ºÔ¶$‡½»w“–™MN^©;èòDDL ‰‰Kˆ›Öª¶6ïÞµû fÖO¹jw_¶BrÒwóêîC×îÎf"bÎÝíî«Q“;]¹§tr(yR2)µ› Š˜CÜòU¬Œ k6JkÀôâØšžÏÆéjÖìÛ¾í{PP øNâî…ËY¾<žÈ¦Õ·Û™žI^©ÌADÅ&°xé2â#ÛÚc“»ÛÍYÇÛ[Œ•û¶³}û^¸wƤ˜¹Ä.ZʲøÈ.7;:7'22IIO!77ß]'f‚"¢‰™“ÀâÄ8¢Cšrƒò=kÜy÷"""""=¡PHdH±’žÞølüò¥ÄvóêÐ/v9ϯ‰#$&†ˆ°àÁ 1€Â}I,yî­–È®, ï@›¤²É÷nÖ¼º‘İÞUYr(™§Wï&¯åjÁöRò¤²é@*É ¬Û¼žøN6+kbuzCgvJó]ºjPÎeߨN¼ÊŠ'¶‘Û´¿Êì~Ž{Óy~W2ñ!í´³—’›¾“çÒSH~Éî°}IKxî@³ûåQÎÎìtvï\Ìæ-IÌm?{ì¸÷Œõ¬^›Öúܸ‹¥4/›ô¼lÒwn"bé+ìXݫϰ/>‹ü]ˆˆˆˆˆhú˜È’Gvc&d&6¦+E["‰OŒ'z![N2«Úºðm©ò›–$sÈÖó}î]ÅO·q¡Þ‚=/çZÅÞÂNúÛ½‰µé-;‹"qNXϋ졾>6ò÷±ºeÐÓTå6lË ¤poÇí¨äÀsÉìëpv¢ì­«ZB-ëÞÍÓ+’ÉéÁçoÍXËC«Û „Z×’·sz1²/>‹ü]ˆˆˆˆˆ€F ‰ -……ä5>‰%&lðJé¹BÒ’wã^&ÛLÔÒͬ_CXpãýÒ(ÌIcûúM¤öTv§/gnýTÆ»w5™ÒÅâdµu›®¯²jCfýúKf"’HZv7Q˜’ülÒ6­e[v%Ø3Ù°êU"R–Ñ^Üv =ÌQ,ݺ‘å1!X›Õ†e çóôñ¥lÚ€ÝîKÌʬIœK˜€•{“Y»!Àž¾•' (°›‰X¼ŽKcë?;…‡¶±ºq¤ÌÒ2JˆooŠav22íÀ$Ö¬gyB4!ÜÓ¯Ò¶³~“{ìfEr,ûׯt#ÄùY"ê_ÉÌÌ¥ûc8¬ìÛ¾­ñ"{κ7Ùµ>‘˜f#¤,‡ÍeÙ+ûرx’û¥‚mlïxˆ w¯Ûʪú@Àâ7Ðc®úçØìv˜³n¯,k„üˆ\¸†¤E×î\WPQÏîbWR|“ÏÎBÈÜ$¶¬™ÓØ_f^í²Û±3‰Å;v±>±>°„¸ž];3©¡iê.º5ˆ''íõën™mfÇúD¢[Ü•‹!‘ñ¬ze+‡–™Ëº«>‹û]ˆˆˆˆˆ\£PHd¨Š c|ç­®?6Ûµ Ùü¶wU¶„]YYdee‘µ1¶û ë–dvÀýмh3›v´¾…褬¬?¡Ò2(i·í"»»S_ë¯c¿œ•möe!znìµ§æE¬J k³‹àèØÆÐ‚üÂÎ#D­I&)ºí@ͽ’õ·Ês:ê²è$Õw­Ûñ#²À IDATË$¢cºÞu+}õY ÔïBDDDD¤ …B"2°‚Èi¸Ö·§òt|<«6¦°/'Ÿ’>\#Åš›Iý£b»2õ(’èØú‡™™ä¶wQÍ`¯ïÛ_ÇfŽ‹iwj™_Pе' ±Dµ·S_3Aí¼ÕÜïëà} Ñ ‰ÁgfN}õõ°Y­žÈf_J2k—=ƦÌηiOŸ}ô»iJk ‰ )fÌà^»$3 ÀÐ[Q$’Ĥ8¶¯®_óÅ^Jfê&2SÝïúNº›¸%q$Ή%2¤çéKii~ããÔ§ç‘Ú­­ó)-…6‡aL êbèÑúëØbº8öÌâ×õ}Ú1>†ˆÎ¾¼!aDƒ{J×…R¬ÐýýZóÉÎ8ľŒ4ró É+ètçné»Ïb`~"""""M)JƇõ#J)µÒvpqó‹]ÇŽgKYñBv«;-U uÓRsПMbe|X·Ãk~^çÚ•G~{#…ú"é¥~;¶Ö…pͯI›ìün† %d¿úIÛZÇúR_~ñ»iJÓÇD†’àˆkSLÈ ;¿gÝXsö²÷Ð ­ƒ5/ÅBä’WÈx{ë–ÆåÛv+{i&»Ÿ{ˆ%É9}6uH†¨ ?Ì·ªg#{ãC¬h3òeRD q‹—²æù¼ùö›¬›ÓFƒB¿ Xï­ž5Ø5ˆH—…=Ç ©î[hgdŸ )º½U`ÚcåЫØP¿8î¢-Y;wÆG³pU4 WmÄVx‚ŒÌ4¤âPvA³‹ù‚Ý+HŽÝÏÚ˜®×iOýÄ#ïÈ¢­;‰U7ı•Ú©¤“‘?ÖRJwedQ=[v2«R¾A“¸ûÙå,KH­oWH޽ËU·Ò/ŸE?þ.DDDDDšÒH!‘!ÅBT|B㈉ ÛwvïVÝùé쮄0/"¾”ÁaaKíOÊ)ÈëÞê½–Hâ“ØøJ YÙ³c  Gj'5£{Ót‚Æ_«5;/¿[Û^ïnˆcË;AagÃ\ò³ÉhxÒÅ©c6²÷¥º×åÁÌ¢-»H^OdH[Àò²»Vr[úû³èëß…ˆˆˆˆHS …D†KL"Ë'Õ?±§³ví^ »¼u!{““É­6~y"=d—_ÚÁ»'ÈÉhûÛ¡Ìœ9“™3g²$%¿í-„D'²~cÒµÛ›wSpäµ[£ç½šÁ‰ös=º1Ž-•”MÙ{ShÄÓÕo‚•Ò cˆŽèø n;´”.öÜ–¾ú,êw!""""Ò”B!‘!'’Åk7ÞªÛž¹Uë÷r¢³C¶|R’–°!³þ2ÛGRb7§ž™ƒ÷KÊ>µ3Ò£0eÛ.´ýž%:†»ë罚Bv£El…'ȯ<>¨ë+Ê6‡Ä¨úǶ±vã!J:joÍ&9Ñ}Q>sîÆët7ȱض•CínÍÞÆÆÔ†ïê"çôdEõú»{µÃ–ŸÂêÕ©\›=f£ÛËlõÑg1`¿ ‘& ‰ A–˜•l\<©ñyAÚ‹_ÂÚ]û8Qhm¶ø¬­$Ÿì”,‹ˆM¦|™‰Û¸†Øî^g‡E3§qæJ*«ŸHf_“Û'Ù sHY¿„‡6eb6·s±ê7—…qõï]ØÍŠ%IìÊ.l~1n+$'e=O4^°'qN‹«i@u(ìV¡X IKi8K©OóÀ’õ¤ä4ß—ÍZHÎÞdV,\Áî÷k“z1‚j`Ü Çv!•§ZÁ«Ù…×¾³¶B²_]Á»©/™¨¤%ݨ9˜ˆÆÕØ/°m…»ÿkçņµ0‡½É+ˆh™Íֺྣ_Sõ=ë«ß…ˆˆˆˆH7è–ô"C’…è¤WØb[ÁÓ©õW˜ö<Ò_xŽô:ÛÖ—»ŸßÅÆn'B€%†Äå“HÝVP¿ËÝ<÷ÐnžkÙnÒbÖ%æ³á…¶Öò#våædlp_à…è¨ìI‹7²¸åµoÐx¨_â·`7+ìvÝÊd,s¯ök‰^ÎÆ•Ù<±-;`ÏKcÓilê`_æ9ëØºìú¿ÐúÇETT.¹¹Ùl[ñÛÚiež³Ž‰aÝê92a%s¶?çþ~UvÜs¥Ø[®~=`ß³>ú]ˆˆˆˆˆtƒF ‰ YÁÌ]›Âžçµ{ëê–Ì ¬{m/Éñ!=Þkä²­<w;œ”Àæ­It8Û'd!›w­¡£nÜÌD,ÞÂ+IÑ´(<—ĸ֣‘*óJ¹6˜ÃBä²Wyóù":ec&"a»6/¤çgg õc‹fÕæ-,šÔÞûf"žçÍ­=¨98žÍ¯,íü¼˜#HX÷¯¬l ”Gv~‹¡@ù=ë‹ß…ˆˆˆˆH7h¤È¿–WãW‘ŸÁ¡}¤åæ’ŸWÚ¸NŠï¤bbˆ]K\tH\D†ŸœALv Ûw¥p 3R;øNŠanârž]C0PÒÉ,a‰$gÜMÎÞÝìNË$;ÇÝOcÍs³lqÑ!íUìGìÆ=¼±‰äÝ™ä•Ú_&aà 4ͤBâ׳kî2¥ï"%%—Üü†}™ Šˆ&&&Ž„Ä8æ†õdÝšÁ5”Í<—µ)o¿ë¶îÌ ×ýE"fn"K—/ìUÍ–èUìÚËÞ¯²ûP6yî©“æ ¢¢HŒKäîøH÷  ‰Œß¶ @ú¡ÖÅÎmò;ØïYï"""""]gp¹\®Á.BDD†ƒ’g>{Öbvd%=ȉˆˆˆˆ gš>&"""""""2 )† ‰ˆˆˆˆˆˆˆ C …d˜«äÜùòÁ.BDDDDDDdÀéîc­ÔQyò4/¾cåàUµ€Ï3 çNàÉA´™È“3ÜM.`åo*8ÈŸ¾Žû.Ã¥ìÝ~†—*L<óø,sŽ7_$­E%>|?1‚#=€ú6þþ¼¶<’qõmŠÞùÇŽ:H¸ >=BÒùv+t,ï=B›û E¿ÃC%¿yœïåÃìÙaüð®àÁ.HDDDDDDdÀ(j¡2÷8ËÞ®aÚì‰ì¹k æº+|öΞ}÷ §+<ø¯»FÖ·©&pòXv%L$Уšsïçñì»§8Z|3Ûîk.”Uòq |Å ¸r™ƒíì¼1˜©Ã~þ4Ͼ^AÒë§»ÖÌxdïíHp®Å~†¹Ò ü&ßê<œÏ@Áˆˆˆˆˆˆ š>ÖÌyv¾SMùè@¾{×Ì#™öÕ;x{õ,þ뮑×ÚøûóÃ'èàÍÄ»"xf<;úû¯4éÒdÀ;GNÕ`?UÉ ŒµtT‡æÐ‰<:¨¨áØ•ŽÚJÝÊ|Àå†Þ/ä¢DndÑ$ee‘••E–nG/""""2è 5uÒÊ>L Y?Õ« ǯ°×á7µiãÍWnóìüõTõµ—C,ÌöŸ*à³³v0`Þ˜þ8é.ß)ÓxAÁˆˆˆˆˆˆ Cš>ÖDå…Zª€ðñAí·¹ä ˜:¾iF.–]…ІG0;°†ôKaàØ`ŒÓ©"µÝ½Ôa?_Àï.€g /ÓGöôˆÚQQÁc›4{iêô[Øv_ûÇ}#ó2øŒgÓª8Ý ¡©d""""""rcS(ÔCöº:ºvú<™j‚£5; m0+4ß ¥­›¶kBÇû³íÁpû¬êzZS¨ß)ÓøaáG<öaîCçÙw[0ñÃ3'‘a@¡P¾cLx§/”BTÛi@c›âË@ó‘$v›ÃÝÆâÙìõqa^ø­"e¿ƒc˜xfŠ?\h£s…5ƒ¦òøgüàúúgæÍU $""""""74­)ÔÔ”‘,4Á±Â+TvÒæôÉ’†{yÕ«äðq;`&þ6ÿæÛÜêËàX…L^„÷h=!&û6öVï0w¸pµt¤òxÃÔ1pBº=½ˆˆˆˆˆˆÜø 5ÊÒû¼ (.ãÇï_r‡/uWøì/qÿæ#¬üßÒkm®ZùÑ›ùÕTpê/'ùј:ý&´a2ŠõsÀä±£îŸÅ‚Ot¾2BÇòÞ#+ìûu?.ùÓ7Âñmhsé+SÁ±Æ¶ïóߢÚ´ÁߟזG2޶Ûx𮥣÷Û®£ƒó$ÝvùòeΞ=Kpp0#FŒÀhü~gΜaòäÉ †Á.EDDDZp¹\\½z•‹/Þf0d·Û9sæ þþþàíí=•ŠôLuu5åååTTTpË-·´ 9­VжÿW]FO0š¡R‘>ætଭÃ`ödÜ·Vô*#…*s³ìí¦ÍžÈž»Æ`®»ÂgïœáÙwÏpºÂƒÿºkd}›j'eWÂD=ª9÷~Ͼ{Š£Å7³í¾¦a…ƒŸ½s†ÝÒ*¤˜ñÈ,Þ”ÆP¦‰ÒKì+«\VÉ‘+°`dgGÑþ>µµ¯ÛTpøõ<Öì?Çä1Á$†vVCÖÒ•:ztž¤=EEEŒ3†#F v)Íxyya2é\EDD®7.— ³ÙŒÁ` °°ÈÈÈVmJJJð÷÷'$$d*éooïÆ ³¤¤¤Íï±õàî@ÈbèòDúÑ„ÑbÂe·qåƒ÷ øÚß÷¸«! gç;Õ”â»wÁ à1’i_½ƒ·¿Ú¢¿?ÛœH ÞL¼+‚gÎç²îèìŸÜ,¨½PÆÎ“ù×[» Ê>­ä(âB=ÙÞξ£,¸Ë¿Óíz³Ï¶ù3û6/8_Íþ“å$†t¾I¿ÔÒv==O£{˜}<ÛoRu|®Ÿ¦ªªŠ±cÇv­˜L&<<†À_%"""ÃÉdÂßߟ¢¢¢6ß///ç–[nàªDúV@@gΜi3ºzì3÷!‘ÁÃ[Þ èE(4øóO:sÒÊ>L Ùþ¨–ãWØë€ð›ƒZŒlñæ+·yvþzªúÚË&>¸HÝš¶ÿç±#W8rªð">ÁyÀ‘SÅTv¶Y¯öÙºÌþëZZÕÑÃó4 *ùøÍ£<ðr?;ÞvE•Ç?cùËŸ²äÍs×IÍ""""Ýg4ñôôlw-ššM“!ÏÛÛ»Ýõ°••š2&7.£‰º^®“u݇B•j©ÂÇµßæ’ƒZ`êø6ÖÕñp¯ur±ìêµ×|üø·É¨¨àÙݼä¿TÄÞ2ð õg†ÏxŒ§qjT‡º²ÏŠ Û|„{šüYùNiû}Ö]á¯'l€™Äé]%Ô«ãïj==Oáìçü8ßE-RÓòZC•Ç?ãÙ´*Nó/±÷ìà”)"""Ò´öŸˆˆ´ç†Gg¯«£«‡4õ¾@bO•’qè_™Òõ}­â;y4àÍì)f¸Ðµ©Qî³+k ÕG |ü½ønâÍ,h?3ëY--öÓª¶NêèÍyêw7Oæ…ÙŸ²ò°òú`"ø×)¾Í!00ov‹oÜrEDDDDDDúÃu?RÈwŒ Oàô…öGÌ4¶)¾Üê=»Í=TÖ×ÒbíŸ[xrº lU¼˜ëìb5¥9ëÌ,¸Í=ÌÖ<Ù—YtqjTöÙ‚¿?¯­¾ƒ?ÝëM`¯3:¦÷ôê¬^[=‹÷þ´ «:¬£—ç©ßy0î®ÛØ6ÛŒ{\“;zñ/¹­¡ÞÕÆè3‘Àu 1e$ Mp¬ðJûaB}›Ó'K8×ìJ·fâok=:eÜ}ãYd*G×å¹T¾ ;ë¶ÕOñúEG ËS£º½Ï6yà;#œï‡¨½ZÅ÷Òò{´ô¾–vêèƒóÔÿZCiŸV+‘aãú…eé}Þ—ñã÷/a¨»Âgùˆû7aåÿ–^ksÕÊÞ̧¨ ‚S9É.ÀÔé7µ3½jof|u ¨:_Â/r«;ߤ_ji]Gßœ§Ð2B""""Ýd³bµZ±Ú»‘¾ç„wä¼§NÅ;2“ofiˆ\džĚB¾QSxÕršß)àÃÔ>#Ì$Þ{ OÎjÒæ,?û „e/7i3™'g´¿³oÌþ5û /ušS4L‰ò`A‹QG㦌`ìQ«{jTÖ'jwŸ-×ñÀ›äÕQÌh«#Ÿ‰<¹ ‚ƒoW“¶ÿ,ñQÓ˜Öùî»VKw4«ã$>–.œ§»üÛ¿›Ü€ª†ø”•‡k™vCBuÔTÖP‡^¾^òãw”ãàáã\²ÖâŒ#FsÛœ»˜d‚K™ìÉ(ftìÌ¡Éã1tx©‹íºÛ¶U~Ÿ÷²‹¨a·Ü—ÀÌn¬ßa_1Af·s¾ÕT;¼ñ6÷ÙaˆˆˆtQ ûVßÏs™€y);­"ºK›e³ë…­ìÌÈ¥Ôîˤ»—±vÍ2búüÿ†Ù°aÁÒÓÍ;ª³d/«îO'îí­,ìUݽ¬±^NòLžØÝΛ‹w•Ô¥O¦gºu.úæx‚) €€„…˜üüÀ僺²RJSÞÀe¯mÕÞ{êTF.¸ƒ¹Éÿ)s:©--¡òÿ2©9}ºÃýùÍ™ƒÏ1T}”53³Ã¶AŒjKJztl>wމό;ÝÇ8ªª¨ú0‹ª?ìVýYcƒÑ__ŠGPÛ‹èÖ•–Rü?;{Õgòs C"|oä?ní¬Í­üû­ðïí¶™È¿­žÈ¿5{-ˆ…˃XØiÛöÚ¡SصúÚÓm«›¾Ù•}¶Õ¦+µƒoToFµ³É˜È~¨¥ku,okÓçéúá†vÅÔbö¹oÉZIþ_ÿÈ›§¨¬kx̓àòÏ QŒê§¿2yëƒ/𠽃{î 'ÀT͹ì÷8¼?|õ^¦öÏnY9ÇŽAè|猧w7?mÑ×¥Lö´Ùî,ïï9‚}ÊÃÜ{cžT¹ÎÙØ;jvMá^V=ô*Ak6òæÆHü°‘¿o«V$³qWÑ}–ä³kÉ.¬9DòÎê¼jl":)‹¬$÷ãœä™£¿¾´Õk׋¾<æ4¦‰ 4ªáøï¶ñê»§¨¬ó`Ô„)DM™€¯G%¿ÉKÛÒÈ÷[¨æÓœóØGßAÜœp̀ɛ‰³ç1Ñ«Œ“Ÿ^hÞ|Ìxx`GôôZj0bTo¡6új÷ÕÐÆ?T‰ˆˆ\§ld¿º‰üåY¿0÷ añkX–ÂöŒÂ>Ü—• y] ªZ¨:{SãP44Ž×# ¯[±Ü|3/o¼&ߊ÷ÔiXn Çàå…×äÉxÝŽÇÈQöU}âU„ÑlÆëÖˆ>©Ï`öÄàѳÙ5à5ùVêÊË)ßû'ìŸN]Y)Ö̃8íµx…w8c@jª†ê1½ŠE¤Ëj>~“߯q_fÙ?ßOX㜽Ë|üÛŸóÇSYü.-Šï=Ö·;®>ΗaÔŒðáH³v?¼”íåS½¥Çxÿà§×8ÁèÅèÛæq×Ô æ}9Jùè½ýœ6ÞFÜ‚©ø·“ÂÔœÞÏžŒbj0â5îî¹+ÀQ‘ÇÁŒO(j¹K™ìɸ̈ÀÊÊjý ÷Ü”ßvÛf{ºDæž Î×9¿ç÷_Dóð½S[˸;îá®pŸúcn¾Ÿ¸©æöûºýr“évM÷™Cîvïâ-Ôõc‹#¼ø}>*ªÁ xŽ #fþl&Þˆù¨ˆˆô’ö¦g_ `%/¿ávoÝJ!Ä$&07¤Å°[6ûR!nGd‹NýˆO>D<ÔOEÊ&li);óI|e?IyìZ»šm™¥4‡å×±¬a®Ya×n"-·»9ˆ9+·°yI髞`7À3aG–{dŠ5§ý~º]gv“sÒrúTÉ3Ÿ¨ß¯»Vóô¶LJí¾D-ZÃú¤xÂ,%ìíNm—˜.«ê°Ïtæ|©“ÏGDD†»’Ìílxa';wîdçÎT2ç’¾³áõM<½»‹k)ˆ!l|g{I#'h#‡²‘SJʪ'Èœ³…}YYÚGöªÕ¤’²v-,“CYYܱ”Ò¶‘QÌ­;X ,n(ì ŸžÖÙ9Ûy"m[e‘•µ—ål 9½èIMÏKWçÙuÖg&)¹‘lÎÈbÿæ»ÉØð› Ùš‘ÅÛëÆ“¶6…®µÝ–ÄÆŒ,²v%raÓ*¶å´\a¼Ÿ>“Aàr:±ä㬩qÿ÷j5§Oár:»ÕOíÅ‹¸jk1˜-XÂÂðûÒœÖ J^ßMÕÑñº%ßY_n¶M{íÌ!°ü—ÝÞ8M­«}GxƒË…ójU¯Î €Ïw`5ŠËoÿ™’×wƒÁ€wt4¶óç»_£§'ΪJJ^û¬ÿ×õuŒ:í×dÂ`±Pšú&5§Ob™8 ƒ—¥)o`+(À|Ó¼nolkôóåòŸÓ¨úèCÌ!7á;sf«ý¸}:¶ÏÏséW¿àjÎ'xÆÃϯ[ÇÜÞ¹ …DnXŸsês`Ô¦´ù-“™PÂuµÚ¹³׎ $úfüM`¾é+<ððW¹£qÁfÅGÒÉ© ãž!OÆE¸Û˜oNö†}ø:«a³¹uT-ÅgÏ5n7:ìfÌ&3æ/:kÛÕc1á?õ6nòºJáɆà¥É~z?߬É>»xlž&Œµ¥œþä4_ÔxqÇWæ¡?‡ODDn,œæ~˜ŸIJî"'ÖOã YȲÄ\R2ó_=ÄÚ¹f¬%ùäååc¥É:GMuØO?2û”ŸNú¾Úü˜»ö[Û[¹Ó›œ—®êBŸÿŸ½»‹²Î÷?þBtð†Ñ2!ÊÄJXo˜2§]÷”J«Ò©¬íd¿VWñÝÚÿæDjªÛ² Æxþ<ŽâbΕ”Pù㎡Qõr®¸˜ŠÒRªÎÃqô犋©>ÐÅçB}ö¢"Î:ä¬Çf£kÿkjµg/,äÛ7ß ü³Ïè†oß~õêhLlMé»¶¦éc"W¬®Î7xEg=”h“å„®¢;G9UZ ýëd;N!ÿôU éæaßS®QFŸÚuG7#ÝmÅ|ù]%cƒ¼eS|ñíî© iïSPó÷]NÕ߯Á²^Ô;W¬• Ä×M9¶!·uj7Ù‡ö±ëp6t3ü“(~:¤W+%""W’À¸—Ù]ý¿˜módI6ÀT^ÝYãéc£›ƒ %›|+P7/b+$§4€€ þŸÏfß¾¡vùi6ÀNÞºÙÌ\–G@X$‘cýpÓj#êinœ0l:o¯4°lålîù])†ð>Cü07#}¼Åøs¨Õ/ÕªuFVcôTHØXõ„µÑkÒ®½?çm6|üüðñó£÷¸_Ð{Ü/.”©(-­µOcË5µlE™ó±Ó]üë¿*=nNW£º4§>û”.~º‡^_èuôùÅ¿púË/)ÛùצÇXUEUEÓÊlÕz++9_î<÷ª稭=mÛ7”WåÿM>gnº‰‹KÔTrhÿ§ì¯ΟxØ·Wwºñ§NnCݹ62†ÐC›ùûgÙ ¹ûVš<¶¥Wwºa$4î.FÕ]?ç»ÌÆ—mT;5Å• j­QAÛlä±a ÿO¢¸ë'Py¦”Âì]dïû”CCÆ1¤ C‘ŽÎ£±:¡Àŧoûa4=''z*<»-‡YµÖ¢ÉKMdféb,×ÙÅJS™µkæºy”’­¬Zf#áƒtŠÓ˜ùÎ6÷M{«§5âô"0ò¯}ì%ämzŽGæ§ž6º+y?ÖÂÆ7ØfuRZÊ…DYQ~&Cç—¤·ÑkÒq‚¯¥‹_wç#é+*¨²Û±YvqêÀþZåj®åSe·{,×ã¦ÚK x+[—£¸˜Êòrº]3ß¾}©<é\×ÀÇÐ^#GáãÛ…³‡7ê¸*ËËùáçÚ?~¡¡øß:†žÃÂp«?¢Þ[Œ5»©ZµÞ.]ðéî¼ö1t÷—±ÞTÁžaÃèx5§öï§ìo;/<É­©±ûîlAA½²mIÓÇD®X]ùÓ‘tç,ûß{O Ë/l©(ÿ’¯oä+à*ÓO mõ¶{0Í:é©&Ï_ÇÕÝlýÌÙFeÙìøà}6¹««)e½K%e_䛳ݹ:´5§hðíçΞir¼'÷}Ìûýƒ#g*ñía¤_ïîtñíFk^¹ÈÈé M}–%iy8ÿ&o§Ø²˜EkByæa7kj„Ž%>|)©®ò¶l’ãÍ,J¯Mb£Ô¹ì”µdãpÍU2v›½‘õ´ ÎÀPB ™lû›s1œ’­)TO±¥?‹91•B;àHpP(Õɳ–ÄØH­Z§•ÔÔtJŠÓX›BBŒ»uPÚâ5¹L¸™&äM¯ŸŒ gx8çÏœæÌŸc?z„ªŠ zÜx#Ý  ×ÈQ ø÷ßÐïî_ÖÚÏ[¹ªŠ 8_‰+#ÛØ:*OžäìWtíÓ‡>¿ø Òµ_WÅÞE×¾} ‰ó†µSñcTUa¸ÖY®GØøtsùï;énúÿ:ÆQQRBey9ç+*©:w®E16E«ÖëãC÷üBB‹C8¾9æ¶œO·®t0¿ë‡@—.àÓ¥IÇì­ï.5¹’…Næ×ãóúŽãì^û2»»úãßõ,åg/Nû!k¹·ÝOxS‡"7À7h,wFÿ“]ŸîcûFçÓ9ºô¼š°èh~Òð¸žñµŒŠ cW6Ÿ¤~]ºqUèíÜ~]}|9<ŸÍûw³ï¦»Õ¤OuÚ  =D2ÎÔ×M\^Ê6µ.=02š[½SsmÀ?Ù_ƦÊ;˜djü±õýÉ팴eðiZ*».Ý®"ô¶±8g`;Ÿ€vjhõÖ"""îøa 0 j`”Kp+? fí’EÄ=WD9þ„ü|:‹>ZDd Po­Ã`â_~Ûs³‰]VŠÃ?„Ÿ'¼Íâh#ͬy›˜=ÓDŠq, ‹“˜e™MN¡¸àP"ãÃX3ûvÊ_ÞÉâhoõ´4Φ-žÊ¢gïÆ”@LÒ3Ì» +`ŒNââgIŠ]BQ¹€ðI,\ï\—‡&ÄØì5 [³ÎH¢ƒ-Ìžï|úØâ•LV§/Ûð5¹Ä||}/q]9™öeݱYvQåpÐsä(âÿ€ÊS§(ß³›ò¬Ïj°9ûU>=n¸CH(W?ôoœûö8çOŸvGÖ§ôùy4WÅÞ>>TÙÏrjß>ì……øöíÛèkwSyë§&×{þ}º½Ã‘Æf³Ñ£G·ÛúöíËÉ“'/qD"­ëäÉ“ôíÛ×í¶ž7ÝÌùs—8"‘K£ª¢¿°a-ªCI!‘bÀ€|÷Ýwœ>}šóçÏ·w8"""Ò”––rìØ1‚ƒƒÝn ¤¬¬Œââb’çÌ™3SVVF`` Û2ÆÛ†O×®T9ì1$WŽó•Îsº[7úü,ªEUùTUUUµRX"ÒÆ~øáŽ?ÎÙ³g©¬Ô‡šˆˆˆxçïïOpp0F£Ñc‡ÃAII 'OžäìÙ³—0:‘–éÞ½;}ûö%00ƒÁà±Üy›ÿž=? [Ù%ŒP¤møúûÓ}ØMôùY]¼\ßCI!‘NHÓÇDDDDDDDD:!%…DDDDDDDD:!%…DDDDDDDD:!%…DDDDDDDD:!%…DDDDDDDD:!%…DDDDDDDD:!%…¤}”d³nÑtbÍ&L¦hâ“Ö’]R½-DS"i%^kh;ö–V!""""""r…RRH.½â4ï^LþØ…|`É"+k+É1ù<;3™œVËâ²îq¬Êi­úDDDDDDD®,J É%f'{í ólÜ0Œø»Y¡©¬I/n¥vlXó­T—ˆˆˆˆˆˆÈ•GI!¹´ìÙlÝ1‘Ãêl0›laelpí_×›J–C²ÉDr€¼u‰¦ M_¼•B;@ i‰¤BæØWÙš•…åí²ç“za`Ò&rcɲdxÇC½"""""""W%…¤ã2 (ÜÆ¶­yÛ˜YXè¾la&©¹S™ïš²Çôø\R3 ]ˆ3‡6½^‘JI!¹´ƒ %›|«›m¶Br m¯kØtÞ^ƒ5e6÷Ün"zú"Ró<Ìó²’ÏfßnÂdrþÌLü í`ðoF½"""""""”’Br‰…=Ò·åÔ{\|^j"3S ›T[`ä,^»Ë®Ox#ÎFòüòÜ4†ÆT^Ý•EVVçܲæ×+""""""ÒA))$—˜‘Óšú,KÒòpŽÓ±SlYÌ¢5¡<óp$M`(¡†L¶ý͹øOÉÖR]›léÏbNLu.íHpP(F×Í ÝæJ=…Ž%>|)©®6mÙ$Ç›Y”^d’÷zEDDDDDD® J É¥ÇÊš¾ˆ8“ “)–ÄÔ`}´’ØzK÷D0mñTl+ïÆdŽeYù$fun1F'ñÆX I±&L&3÷¬±“´8çóËB‰ŒcÓìÛ]‰Ÿ`â_~ƒÈÌÙÄšL˜âSÿ6ÏD×Oõx¯WDDDDDDäÊàSUUUÕÞAˆˆˆˆˆˆˆˆÈ¥¥‘B""""""""’B""""""""’B""""""""’B""""""""’B""""""""’B""""""""’B""""""""’B""""""""’B"‚›ÍæñÇÞÞá5YÉ&&S"i%[’–ˆÉd"9§ý"«éرcLž<™”””ö¥s)I#ÑdÂtáD¨>_jü˜c™¾8ÂŽwò‹ˆxdËKcñÌx¢M&L&3±Ó±.»¤‰eÜ\3«Ó¨]›xÖ1îUœ\±6êõmîgjSÚ‘K©k{ "mÌžÃÊGæ“N~žŠØƒHX™Llð%¬d²d…˜EfÇÖ^Ž;ƬY³8vìÉÉÉœ:uŠ_ÿú×íVç6ö>YG €-›äé3¹g¦ƒÖÆÚΡ‰ˆ´”='™éXˆx1™­o„⇼ÔE<2ónr_ÞÊâh£«L*ÆYoöFFìn]BÒÌ»É|æVÆÕ¸¨yÍ”hþ½Š½8›””&%Å^~¯ƒ>SE®J ‰\áJÒ×;ímRã¼d|òÖŸšClbÄ¥ ¬•86$“ofú°öŽä¢š ¡j¯¿þ:€C— c$I‹g‘þà֤ǰ8ÚØÞ‰ˆ´@)‹R(šô*ëbC]É#ÃâWb‰¯Sfì3|2=çUÏÐØ…,ÊÞÆÌ%kH~][_óîUrXu÷LRÆ>ä¶ ¬µ4ê35‚¤¬,’.yp"ÒM¹Ò9À€Á{™€‚—&œVDE¬Zå~(rIöZc͆6'®ËsN•ËIÆd21sÝVÖ%Æb6™ˆž¾˜4K‹§Gc2™ˆž¹Ž¼àí…5¶Å'±.Çæ6¤ÒÒÒz ¡j¯¿þ:ùË_ZåÐ¥„†1Ø–]ØÞ‘ˆˆ´L^&©Væy4JÎ6ÖX!2&²Î¨?"cãÁ± KŽæÔ¶ºîUÜß_”–ø)™Ïq§é1þd·–<JØšhÂdŠc­ó®iiZØrH]4X³ “)šé‹ÓÈ»pÛâšÆ53™äD3&Ó³¤×¹¥)NKÄl2“˜VÜøã¬õ™ê®šÓÇìd/6c2-ÆR}ÊÙ³Yl6aJÚŠÍc¿4.~i%…D¤ã M iVd.a¥Î¬m+Kf®‚Y••ųÈ\6Ÿ”¼‹E²·Y‰|y+;_Š#wÏ-É've:Yo?Œ1{ó« ÛÒyîçÈŒHfgVëp°ì‘DRÝÜ+õìÙ“¨¨(âââÜþôïß¿íúCšÆÏèüòTX¨õ D¤C³Y󱡡ž'ÙJ­8€°P7#‡];²–ÖøvùwÖY;&1MWË&óz¯âéþ"¸•o3 œÓ´²Vóï1?ë6r {>Ù™V¶å6r33!,žÈÐ<ÖN„%ÅѼ¼5‹¬+‰ÎާW'”\² [h!+«öè°òœdŸË$hÚ¼ìm”y]î>S=´~DÆÅc`›2çœ=w+›05>ccî»<Ö-"M¥éc"Ò¡…OKbêšÙlHNÁ<½Æc,É»"ÈKÏdåb éÛò(­1"ÊÉ0? ,‚H6C¤ %Èt¶eoc›¦ÅDbŒ?aì’çHÍ,$>>´V<=zô`Þ¼ymu¸"""-ÓØ‘ÁZS¨ÕxºWñ~Q»cd4cyŽôœâm™l"ˆ  +ùÙ…Ø&cÙA³Â ²¤²ª¦¾:#@Ó¦²jö*R-ÓXdvUMDÝœOáZ’fQ¹’"Z¾^£»6ªEL"!(…UiFG“¿uÃTb#ý°e6¢_¼Õ-"M¢¤ˆtl~fŽeÃs«H^tñ÷%[I¼ûwd‡McaRÉÃܳ$³Ö®‘¡A4†Ýæü+VÊ#&j=G¬Pã•;4»ëÉ{¡ÁúÒ#"š1 PXXî¯hÕeò‹‹!²ö·i»Í9‚Åh¼ÜÛp…ðp¯Ò¤û‹ÀH¢ÃaIf.–ÒtAñ$%d3ÿ9 9“üØD c‡aË·:›¬ñZVÿ»ÖH°þuÛ°QNvI<ÁMùpt÷™ê® †==ŒUK¶a)6’½ÁAЬx"ý ¤1ýâµni %…D¤Ã Œ›Å¬µ™¬*²^ø]Iæ&20-)‰¸(.lÁ¢I®aõÓÞÎ"©ã­Å-žæ“ ÄD†µw$""-CBÐ;¬ÊÎÇè~„Gu™ÔLŠã⹘²“¾ 1ÄDjN[qw¯âýþ¢î3냉Œ ƒ%kXUhÅIt„‘0’INÀaˆ&rKI'g¢Ï¯Æ¿ +Mã•a¬½ç¹¦?1­Ögja£v O8KØ´ÄA&AÌëZ‰»Iý""-¥5…Dä 0ŒiISk-§ígtÞØ—€=‡”µÙÍ®=ÐOŒÒ·ecù£ÉG²äì¸ly¤®Zƒ5dk1éð†1mñ4B6%³dk¡sÄ6òR] o-¹X&7™E+³]ë¾ØÈKÏü 0vá,­ÍÒ¦êß«x¿¿ ,(µQî*OùäçCtd(„†䠨Ȋ!>†À/2އC`Úrl€-‡”5 $øÈR<¡a„„Æ‘´0ÜùÄ´<ïÅ/hîgjp Ó~™™™6èaéimJ ‰\áüŒFJm¥^ËØòs(hà e—9?s Ç^ücô,^žFæïîÄ|w2ö¸©„Ù9…M¯ÜÍ3ëžalNãL&îYecê‹o¡aöJÍEScg“´ÖM§IO¹LùE$±öOÓ1¤ÌdœÉ„ÉËì4#³Þøˆ•±5ÊS˜ãbKh£úEDZ›OUUUU{!"m©˜ôÅ‹xvCî…¿4Õå97^}À¹è²ˆˆˆˆˆˆt J ‰ˆˆˆˆˆˆˆtBš>&"""""""Ò ))$"""""""Ò ))$"""""""Ò ))$"""""""Ò ))$"""""""Ò ))$"""""""Ò ))$"""""""Ò ))$"""""""Ò umï:¯ Ê ±|»]§«8ôêi Î<#€#,ù[6õîÍŸ†1 ½Ã­aÿúÏH:êaã køë}ƒ/Q$—o‰ˆˆˆˆˆˆ\î”j'å¹_2ý“³Ü|ë`>Šê¡âG>ß~˜y;s¨¬+/Eµfk8ŠñæßÏßÐ'OFÞw :VR¦uû@DDDDDD¤£SR¨]åíg8yu¿ê kn¾kŸÜU]æÇVl¯˜?®ûžM½{ߊµv,ꑚ”j6¶VÂMÁ}ðoôNuGå”’¶æ0+Êz]ÁÞÁœá û¹Ú•öìEüPÀŸq7|Ãk{ÏòÙWðÓð: eÙ¬>ÿ´„å:Ê.k{ãŒl|_^JlïDDDDDDä ¢¤P;ðïïK7àµÂ[i´PÿkùͰSÌ˫䋣e$­Ë­3ë¢r»snÕ¦O>cÓ'56”9.þÛÏǹÖQ5¦îú4eèµÎÒ~ý»®¾«Sî»fý¿øªgwD\¿#ÌÄ}|† Å?RN@ÖªéåµFÀøssÜh>¹ëG¾ú²„ŸžäÏ'œS±>š\{Oƒ¯“î¼…¹u+iV4ª;7·y•V\üç‰ïœ£ˆœSïNÕ*vü€/€IQÄ…Ãq«çcñÞxéˠ惈ˆˆˆˆˆÈeFk µ‹A<|Gú~‚¥ßá¨ø‘Ï?ÞÇ/Ƭÿ-u³~À)GN_ÿÀÖšI¡‚ϹçåϘµÝÎ áC˜ñ³ž\ îez2¸'`¯¢œ úèÍ-À΃…”SÁñí{ù——³Xž{¦ÅGÖ&uŸ¶‘²¿ *¾cçWàÛƒq7Ô/æïç ÀÑïN©#¤|Ys”PúÀk_Šˆˆˆˆˆˆ\4R¨ø‡ßÈZ¿C,ß^ÄÝŸqèÕÓ@üøë™1²þ(@Ü'عé WÆMîâöÞðç2׿¡a¬÷K3Šø—ƒE€#B¯æ©;€ n¿±+ïìµ1óåýü:ÞÄSñ,ÿø{~ùò÷tóõ%úÖÁÌ o…us†x®»™…èÙ“Ž~Å;*ÁÏÀœÉ¡Œì^¿˜Ôµ<õu!¿ßûwðåöAÝTx–Ͼ>áMèo})""""""re𩪪ªjï DÜs=’¾woþ”0ŒíN;s8 ‘NÅ`hþ¬Mé„”é„4}L¤ƒÐô1©¬¬ÄjµRVVFeeeÃ;H‡Õ·o_‚‚‚ðõõõZ®%ÓÇ”é ”‘¢¢"ºtéBpp0=z´Â¤E²³³‰ŒŒlõzGÅÇLJz-«5…DDDDDDD:²²2 ¤„ÐÎ`0LYYYÃ…[@I!‘¤%#C¤ãèÑ£G›OTRHDDDDDDD¤RRHDDDDDDD¤RRHDDDDDDD¤RRHDDDDDDD¤RRHDDDDDDD¤RRH¤S°c³Ù<þØÛ;¼¦úþ0s_Íaó÷¶Ÿ8Bòÿ=Àݯàî?ä¿óO5³¡³8*šd34t\µ\âØ:{þzæNŠÆl63ãi>̯q–ç®Àl6×ø™ËæR;E›—2£z¿ûŸd}ýìùëy<ÆŒÙü46om4\ÇØZCéfæ^8¦¦î»›3&m6=in­¸=³³{i4æ¹m؆Öí,ƒÙl&zÒ –fX/F”¿ž'ïqõñRjlª ›çš™»¹ÔÍyáü‰~Íýqym£9±5Ô¾§:ÚÏ[{ŒÓÛ9ìe›×sÏûûBZ¨¹ïû–\/Z½ã݇4R­Ï¢vìëËâu‘Ë•’B"W:{.¯=ÏŒY³˜åá…Œ IDATçáûŸd»Ç/MM n8ɱkùhv8ë¢|ÙòI+kj=ßòþëy¼‘ß1¶ÔåÛåÀFÆ[¯À£)X,Þ™`ã «©þj_ZT“—±ÓbÁb±`±,gbÿ>sß2ðhJ:‹…”ß°>ádØœuîyï O¤b±ÍÕÇVÌ]OQCu†Ïq®Ÿ”ÿ`¸a<Ï?^¿¬·6š›·ö½Õé5n/ýí­Noç°×óÛ‹æî'W¸"Ö?ËêËã"ØÊê|nø·w<ÒÙÙrRY4=³É„)z:‹Ó Û ![LÚÌ$ÒJZ9¾’4MÉä´C\’B"W¸ÒŒ·8xï›üùwxÇÃÏŸŸç­ÛþŽÌÞÐ'OƒÕ ÇNÃõ×¾øGù9çøç±¦ÖSÁ·ýž{É]α]ìÙ÷÷QŒ¿-€ ÛÆ3ÊQD‘ë/¤Ö¢} „_ÝýÂbÆ9Üftþo@Ô¢[Ø[àÀfƒ †ÛðVO±µ¯RŠBÈPglÆÑãk¿'ElY}œ°Ÿµend°Ñ1ЍpW'ç¾ ÉÈ-cO¥/gJˆóŠϘ#»ÉoR—²yÅðÿ'ˆ2ºÙì­V‰­NûÞêô·—þöV§·sØëûÄ‹æî'W8Ö‚+õ­Îç†H;²ç$3}f:‘ ?À’•EÖÖg‰Øö3S [¹¥Rò³›þžnûøšWg¡¤È•Î ÞËô#¨M¯“vr_{˜ØØ¼ëaº€=ÿ]fÄÆòðk¹Mü«ÅXÞÿ'¤£#?é ‡¿)*)Ï)'?"‡œâ‹rÏ«¸ëÕÌxÿkŽºmì$›ß.a °eÇV~îúu™•ÿ~ûâ´´7r~tþÞ5íë¿wð¸«î§2J8ºëKf¼z€»^ý'Ïï:Q»ìÿæ¹¶å°ä¿wÅ^‡µˆ%®ipw½z€¹[)ojlQy)V ªO{CÍw@)ÖÃ4¢"ÎwO)›çÆñâx?Áìœ&åµ Oõ4`Í`©kª9zs×ç{~?ØrY?÷â´ w÷ÖL8(ÊXÁŒ3æ˜,Ý\t±žÒ½¼ëv¿F7PTà6hÛ»ƒÝ†ÉD õÜC¥ÛW1á î­ûÇãq4Ð†ÇØ§èxÝò6òwì`ߘ Œ¨[ fs4¿°·ßGó·ðVÑc<61ÀÍÆºÜ´ÑÜØšØ~½:ëí×ô×´~œPënp›—s¯ÑuJË”²{éýÜ¿b/6p3u(—f3g|zyÍ<]gJ73×ü+^{˜hs4+ÒÿÂ\óã¬xw©³žº×/·õ”²ynïã¼¶^ˆÇãµÍNþú¹LŠ6c6Ç0cévŠ.Ûygn>7êòt,Ýì¹/½mƒ>¤ó*fSr ¡Ï,&~˜+3ïJÜ3Ï‘WL1¸Fâ$ñì³Ñ˜¢“ɱÅé,žÉd&6qy5Þo%ÙkIŒ5c2™0Ç&².Ï”–ø)dò܉ÎQ9^êhR|5Õ5”“Œ)1 ç  ,ÉÓ‰5›0EÇ“´.{Sâr×€’B"ÒöŠ6ò‡÷ p8²:aV½Ä=ÿ]f%¬æ ÃAÁ{`ccÿr\QNVj«|û±&~ ΑٽùùÄú†»^Íå_3*ø×_†böòñd‘ßÿfϯ(cÙ®“n*îËÄG™L?‚Ä›JøðýïÈ0€u³GðÑ/{ðåÎ#|xaÒyvïÁïg‡ó?ænüsÿ7,+ëËòÙáüi¤/™ÙßóO.–ÝXÔ§3‚§öâ»\+o~¶N %|¸ù9„f‡ó—‰=9Qp‚¿hNlQAÕß‹‚¹ð{+û àÝÎ/1µ×{©É¾{?Êøð&.Oãwc`ÌïÒ°Ì©žNä© Oõ4Û‡O? ¦n±°sÍCœxe5nïé­|8/Ý·-#Íb!ýÍñì·À9õ€}¼Ÿ1”ç·Y°¼3ëæ²:×îÜoÁ,ö^Ø/нóV»¦ð‰zb!ã1›ÍÄ>]Ä}ï<ÁèzCª.o½7ÚMâÂÛqxkÃ[lu„ çgä ëZb/ÚÁ‡;àxi -ƒ&M#á+îMÀ…Ø`¿Š­–tVM(e÷¾zÆî« xhv—ŽgÙ6 –”GᕼŸ—z˜¸| ÷÷®±à¼´zi3÷-¶ÜƲt K*ò"+v\®IOŸÕºÞ{êKoÛz½¤Ó*É!=w,Ñ‘u®±$-2|á™b¶’•žD„_k“!á²²¶²84•GžKw&šmé,›™MôÛ²²²X÷°e«Ò)!¸•o3±<óÉJâ½ÔѬø!'…ùÅÓø“%‹¬ôd"·=Ǧ¦ÆU³šÒ¸7…¬{À„ÉTÿǼ(öžÕ¦¤ˆ´½ûxé¥ ¨“º˜Ì„—^â>Oß®k©bÇ'‡ùO{V]HöoXù~)'oÌdzÃùË/{ñ—Cl¶V‚o ޳lÙó-GOùñó{G°ü}w Ç~`ãénüë/®q¶50„¯=ÏÆ/ªW…öaò˜øã‹Hw†Ð…ñ¦kðÇ—~]ëUwǘu­®§Šíõæ2å×#X8Ò—òïOðeÑ9Nq*››\`;ÁqÇ`†?ú<›Ò-XRŰb.ïÖ]ŸÉº™ örïS÷6œð¦Iõ1åÍt~{›[iEØpާ¨§h7Næ¾ÉaÎ/}AyhòA6î®Î¨àÑÇ&taÛq6f¸öÏ” ÕûMayúSΩFö\^{|'~å\ïfçÇ“1c›=|¡Èÿ- O¸;./Çá­ o±Õe¼ƒ9/ eǬq˜£'±`w'Ô-ÅS›6‘žúÖóZ®½^ÆÑñè˜:ûÙö°eã&G5bʇ»6š›·öS§»ý¼õwcêïç°ÛmνÆÖ)-p‚Kg0ëÄošï× o×™¡L¸­æöPî›âj7h(!§ÔA#ê©Á[Yƒ‘~E;ر=«ÝÈm¿Mgy£Fó]ŽºÞ{èKoÛšÒÏÒÉ8€PB*Id˜+ ’—Iª#žxs `$rÚt"¶Yœ#gŒÑ,ÎZI|0`·aÀíÝŠ·:š_#Œ8¬X‹í@(¬]G|hSãªÑ­&”^~™Iu¾ãÂæg¢iCo‰úßTDDÚ@PÔS, æ.ØÂWbÈöÄ(ö½ò^„ÐržjÌ1ªøÖ¯7–”ñ7k%S‚|¿>TÎGw~åLöó¯òX–]ÂÄI¡üq\!+÷çñìãг¿Š áÞ°^|û¿9LÏ=請;¿Ÿ]gEȲJ¾ãÿõÆ:qTúЫџ]¸þjW¼ø2¨oÎÕ°t–/>.àÉ‚óøº1À‡^žªk0¶Î¨k)„¥VŠ€¡Æ(ž·D],f¼¨¨#¼°'Ÿ‡Â\_Oí¹¬˜ûø~îíEõÐF5õxÚÏNþúyÌz¥€~CG1zŒß…/u¥›ç÷â×ÿÝËš5PÀFæÛX;¤{«ÿÎB¿ß•B†ŽÁQØŠ( ýÜ}[,ØÁ{G&³æçŽ~!ã¹oü‹,Ý‘ÏmýV×n?%„ÕñÒCîúÇóqxkcâ(ϱÕ;~Ë£~Ë›Û~[]‚ÍsaÌx7× ‰<4ùEæe𛨺mTçÍ~p7;ÆŒç‰ýç¶}Om„‡ÔœØÂÃ=¶4X§Ûý¼õ÷Ca Çéí½àq›‡s¯1uJ ` ˜À˜’abb£—²ñv½ðv釡ÖGeÝÿwi°žF–òo.3ðÊêyL{æ†áãyì?~Ç”Vÿòv)x¹NûÒÛ¶¦ô³t.~FŒRX^³¡Wow”bµ¦ð iUíc™düìä­[Ä¢5£È@x˜?n³Ð^ëhN|0l+'­beb,+ ejÒ³$Å…Ö^GÒ[\@­~hMÁÑ<»òeHœÏ¦"WBèíÄVÔ|J ‰È%S71ôÞº¶45!Ð…ï†)çsf|ró#×qMƒût㚈¡¼Ø䋌c<¹½˜ˆ°¡Üô‹>þEͲßð·šÿÛÛ—þtcæÌ›SÑjúþpâ¨âXp5@%GOž§›o"'¬¬)¨bò/Ãù?!¾`=ÌãGN¹¯Î[lQ@C Õ91‡A¸½Á¶ãp€ÑßÕq¥»Y1ﬥ°ÜÛ7ª†ÚðT·ýJ3XýJ9¦lsŽ–³næñ÷v8w›¸ËÄí}ÈP&óØÎßr[Ý×¼´8£ªç%ìÁ`xŒ! e'l¸¿ót¸uÚ/Ý<—};ö»£f©ÌE¿#í)ƒÇãðÊKlõŽ¿.{÷yÔݨå' d”ÑM6Nœ¨]º`\kJWƒí×l£¹±yi¿1u6j¿¦Ôéí½àõ}âáÜkp?i¹1<ôØSŒ5—iK·sÛò;y>x»^x»Î4’·zê>{ÑkY}Ï¿yØKÉßò Þgø†‡:Þˆ3/×ûfk­×K®<Æ0"Ã3I϶[ãsÊnáÙÛÓ‰Ùµs½ "Ÿáƒ7âêOßÊKaþª’¶fm»e1·§¸kØK-ŽÏŽÍø­´æh{?Bc“HŽM[6+g&’–ÆôaŒ«­çq¹C†äBâO¿,B éc"r‰9C®©d@óBÕ|¹æøUe+w¹ž9?ÄŸ ;›³\CoŽó?Ç}¸ã¦@ʳ>ç⨽üzrM /?Üÿ1®+_(·»Öúx“{žãþ÷[ç¢ÐeÇXùúžÚÕägÝUlÏ.âÛ àXúƇ‰î>>e?”“e)ç0à¨hëØ®C¹mÂAvìvÎ{²îÞÁ¾Ÿb¸P´žû§®`oõáÒ ¶lýQ!Îu^¦- èÞ7ù}ƒ_X½´áµ/û`£Ôõèú½ëße÷¾Bncòð¬ß˜ïšß¿—÷Góô…x x÷C×"³Öͼ»q0Žwí·ƒ·Tï·›Sïg}0t<¿œÁ–êE-¬;X¿e0SÆÔÿº0qy­Ç ¯q. ‚eùD×÷JÇá­ o±ÕeßËÒè®mvŠv¬gËð'˜X?dFôRv»ºÂ–»‘w3Æð«¨‰âÞ1;xí-gߨr7òn­ïaÎ…ÈoÚÀVom476oí{«ÓÛ~ÞúÛ[ÞÎáß'ν&½¿¤%‚&Îá‰òy¥úzBˆa;\ë§•nŸÚcI¼]/¼]gÉk=€£ÜÞ`Y[Æ DÏýй¸´_AA!ô ðoJ~û2ÓÈë}cµÖë%W `&%M£ð¹E¤æU呺h9³âÝÿA1"†„ⵤd;˧%bŽ[K€ÃFi€Fƒ«ž” Ô>}K‰fou´$>?#F2É̵Ť§¦_ØT’–H\r¶k} €ñÂE¢©qµ‘àh%_> !PRHäŠg09Qî}!F[ÁA¬—î0C-IU»šÉQÝ9œ} Ë)ÀïZÙöq>‰ë/g¸q\3‡øâoÌ ì<ùF.w½ú9 Ù8ñ:¹­·c¯õ!óïy<µ«dÊÔn<~œ^=À]kOp4¤? nïÝŒ˜»0¶çžzíwm8EÿÈÌRg¨P¿ †úò·O>çî?‘v“ çùòøÙ6ŽíJàÇè‡~‡ñ­i˜Íf¦½O<åüâr|*€wã£1›ÍÄ<¾ƒá«žgbíx‹-åö¼è\”·úgîfwïÏmx¯ÇKlQ<öD[f™‰žô4£æðØà}tûx ¦¼´ŠÑ»çg6cŽ_ŠuÊ~waž1Ü7ô ób̘ÞHÐóËy(¬þ~ÑÓÞÂ1g¹ó/Õ~áüfùx÷aç“kfì`ô²?ºökoÇáµ /±ÕëþÑ<±f<»ÆlŽåñ£YöÒç¢ÝASxiYΈÁl6ÿJÖ¼„sUŸ_Ô¢§‰3G3í-QµÖÑ)åxü¸SóÖF³cóÒ¾·:½î祿½Ôéínø}âþÜkÚûKZ&„ OÜKþÓ«Ùmç¾ç'S¾:sô$^)ŸÀcµÖÒjÜõ¢þu¦±¼Õ¨)CÙ2oœ+yṬ1ê Vݶ›'ã̘ÍÑL{ËÁœç§Ð!SŒMºÞ7Vk½^r%ò‹Hâ•‘XÅ98Ž[B¶y%kk¡©aÓßHÀ¾ÒYþÔ ®œÆ0€ˆxGf“x»‰èÄTžáçÙù8ÓΡŒÉ÷$’V⥎–ÄgŒfÖ‹lK¼ók0ÆÄ_Ø·$ûJâL&LqÉØ;×>jf\…OUUUU{!" s8šûç#+KŸæ…Ý?öðõ«–ÝÇ¥ž–o·Ûñó»ŒÒä—Â÷‡™›rŠñÓ"˜xu{#""""MNN‘‘‘톸dgg·éë‘MDD„×2CóÿÀ¯5…D®xADýöM.¬%zét !‘ˈ¦‰ˆˆˆˆˆˆˆtB)$"r)]}=Ëg·w"""""")$"""""""Ò)))$"""""""Ò ))$\9GŽžlï DDDDDDD.9­)tÁ–¿ü-›ðeο&®¿ó·Ç·ïåÁ•Lºóæ†W—q£woþ”0Œãë?#騇&ùóëïÊù¿öî,™Á­gó_«JIn¹u/Eõ*øtý>õå7Œ&>¸‚ò‚C,ßnc×é*νzˆ3fÆÈ¾u⯭›oWÆE âÉÈÀ‹e\± „´5_³¢ úº†µ÷ Æ¿½Ø±”³ÿƒ/YP·ÞÊóQ툈ˆˆˆˆÈ%£¤P=•üqûaÆ=x½çäH­¤Jmî»…¿î0ì_¿ŽÚ9tn=Ëg®}÷[z%úÀÀ`(Ïý’韜¡ß ×°nÒ`úu=ÑŒ|æíøŠß_Ǫ;j$3jµWƧëóY¸ó7ô$~PÝhËÙ¿¾Ð™ `M§J¥Vþ_¡3ɶëÓBž%†DDDDDä²çp80 톴±3gÎàëëÛ¦mhú˜ç¬'x§ ¢ jîÊ A Š_—pä«Óœºç¾;Í€OqÀõäfŽòÎö3œìÝ›ç'¦_W€ Ž cN|qàvþ詽ÞÜ:¼;PÉ΂ºS¤Îqd{ ŽVÑ­woV=x=ýÚàˆ/kCy~R/†PåL e”´sP""""""žõîÝ›£GræÌ™öEÚÃá ¸¸˜Þ½{·i;)T—¯½*«Ø°óñC‡µzõþ×ùqͧö÷‡¬UàgäñA嬸ê Jað÷gØ éß×ÇI«„!×Ô™ÔƒŸïÖ³üã«3Œ‹ôÐ`E•Û_ÙžËÓ*8çgä £ž:ÿofŸ3oÓ)U'†Ðˆ!¹< 8cÇŽ‘——Geee{‡#@vvv›ÔÛ·o_‚‚‚Ú¤îjJ ÕÕËÈÜþ6^øªŒ7³Ë™á®LY¾üY­_Ý4âzVÝÐpýƒº3Û¾;Ãñ³üãtÔ“¸Ðs¬øê,û ÏpË çðaDh åÖo8Üä&IÑÕ€oOœvßVÅü#ψѰ]ˆÿépÀ~šÏŽVpàÎ{*(1$"""""…¯¯/!!!í†\!:o&À‹›îèGôW¥¤[ŽðÓÝð²¦PÃúqk¿ïÙvâ,_<Ç.àö®†ëÎq gùìè÷Œ;U 1°:÷rTTШ—«NªWïîü6þ:ÆÕÉWóëÁs·ÂÒ¿Ÿá¿f\ÂÐN;Zœ‰¡ç‹÷ñàÞ œSÉŽ²ux ±È󉈈ˆˆˆˆtDZSÈ^×3c„/ØO±<÷|+WÞ›!A>À9þœ}–stedhèÓ‹~pª¸” ß½»sCwðïïK7àÐ÷?Ô«Éawô÷ëV£úÞüiþ(þ2¾}G…/ƒú×Y>Ú×ÀSÜÈOÇ âñ~p®ìÞÌ.oåãìXÊ¿üœ§÷V¯#åÃí·RBHDDDDDD®hJ y0àŽ ¦úÁ©J÷kò´Äà ? Š/ʪÀ¯#0bœ®à@%ôêßÛ9rçÆ>Äù¡‚ç"Ô”óé—À@ìðº OuÅäž õáÜéS,ØTH­”O¯îÜÐèCìÏzÑH·áóV?ÒŽ¡üËê©càLéñô""""""råSRÈ£ 6èÖpÁ¦»®'·¸þÙkP»þ}à‹­Ý2è*׿ñð=è{ÚÆ r¼ Œ¯>.à+Ü4âÚzSÜz0ò®þLòƒSGKx3×ÃÊôC¯åñ~€ý¯m/m…ƒëX”‘ÎJk yá9dz³¢¬Î7 MC’ç‡3²1÷éÅ¿|f¯™üChOnÂÁø2rP‹q„ßÈZ¿¯ùãßK˜¾â{νzˆ3FöõÜN¯ÁÌWÆ®Oΰiç×Ćû»)Ô‡qwyg½/ãæ~Ú«1q(-ài%„DDDDDD¤“ò©ªªjýùQ"B9û?ø’…pkH9ŽöADDDDDD.3ƒ¡Ùû*)$\9GŽžcð /#®.J ‰ˆˆˆˆˆH]J ‰t‡nïDDDDDDä2sýõ×7{_%…D:‘ºZ2RHO鄔鄔鄔鄔鄔鄔鄔鄔鄔鄔¹R”nf®ÙŒyEn{Gâ]½8sYa6c®ù=‰K7Sdo×HED:7×Q³™˜ûŸdÅnkëÕ?w3¥­P[³Úv÷Ó*ñ4tl-9öR6Ï5c6¯à2ÿd–6Ñ’s§½ö‘Ψk{ "À˜ß‘¶|"¶½¬˜1‹i³¤¼9…öŽMD¤#¨yÅÆÞ3kÞÓ„¤¾É” K†Ýº—÷×—2aήXZA­c‘Ö¢‘B"rù1ŽfÎó1ààx+ÃÖÞшˆt@FF’‘ÛÒñá̱X°4*)“ËêøY¬.r´°M¹”¹Rå®Àl6óøúí¬Ÿ;‰h³™˜KÙ¼{3KgÄ8§<¾žüS´J÷¾ËÜIѦpÍ]ŸOõf{þz×¶f,}—×o/ªQïýO²>·…Éœ¡ÜìØ[Ô²zDD:«:yo×iï×øúÓQ¬K™áú¼ˆž4ƒ¥VœÓ¥x`ϋę粹Եïã+X17³ùV/4cŽ^ÊÞ꘢yØlfê»ùÍ<Ðê6^có»s™m&zÒ\Ö×ø€óöùæì«"2VÌ Æ\ߺÛ¯ßK¹rcâEKÎ˦Üw¹¿Šˆ8))$r…Û·ã8£^ÚÄÖe“qÜÈ‹K ¿l–5¿Âß+,xßunÛÎÒY«á±, )°ç•¼ŸØ÷òJÂ+ìú©–m,_Êî}5±eðâÃ/²{øKlµXxç>¯$ÌãÖÜsøùc(*Òœx‘&³±;# ã™r[€÷ëtC×øºJ·³tÁFl­Áb±°jB)<͇Ö&._ýàœîeYÎÄê¡EûŠúÛt,–çø?¿œ Ž-d87Y÷n¡€Læ¹Í=/WgM¡¹›ë|:ì+À1~9›Ò^bü‰=¼²:Ãùùáíóí¾ûpLy“m–Tžº‡W^¹˜´ªÕ­ïÇ—†äCoý([KÎ˦Üwy|¿^¢ã‘Ëž’B"W8èQ„ùßÐገÏh#Bp¼ÔõgLãü~g*vóÚÒ'yò­à8¥  ƒ3!Š À8z ÷ ½Ø†mßv8 jühŒ@PÔxÆp»5ÊGDä’©•8‰ã…¢(ž{çwD¸N7p¯Ïù¹qdõ<æ®Ø SR°XX·hhá®í~£¢˜Œƒ;rRöf„Sð–r&™,Xjü,ŸXg2Û˜ñDÆáŒì9ˆ¼¾]Øw2ãC‚ˆš0/$­jjL?ŽŸ2¾‘ý(Z Î˦Ýw5ãý*"Š’B"W¸Q!üÔ/ÝÎÜØxÞ/"dÂ~ÿؘ‹›Šœ7!AÕ7àþú]ÜÕasY~?Áõe$îEöE-˜Bf/wÞÆ„iaQ‘Æp%N¶®º—Á8(/ d€àý:ÝÐ5¾ž€ñ<öÄ(ü'Øóþ‹$Ä«7¹ž~ü«ÿí7Šñ“ 82ö‘oÛKÆ0e ÞrB-âåóÍ€ çã ŠŠêSmL?öëgt•n ¥skÁyÙ¤û®æ¼_E¤SÑÓÇD€ÒÝ[Øã€{Ÿ˜ÃÄp°ÖX$4 $ØÐ¤©Ô IDATC‘µÂ€r'jìlpþçÞ5æ„·R@EìÆÖŸYEDšÂ8ú1žúÕnÞ[Í‚ÕÃI™3Úûu:·k|=~„Ý÷G¶Ýg£(w÷ÿgïÞ£Û®î|ï¿Ó™‹Õ”ÈMcœ¨…Ø)ć‹Ôb&(O©Íˆ(O©Ó§ÄY]u­E›ØÓ·CÚ:œN vËJxæÄ©=é‰S&6ôDÍì–H,›Ò*˜G©.¤~þ”È7Y¾Å±õy­•K¿ßoïïÞú]¤¯~{‹ß=ý/<þŸóÀ¯®eÏ7R‰0ƒ«n¹ óÏðìKùOp_ÒÛ„Æ'ÙõíŒÈÙ)˜z»cI²E&`À› íGIgãÚ/c»åÐï»ÚNäö;¹ xåµ£qßÝþ]n|ãqœö<ôÚb–]Ûoc~°ó|éµ(¶Ù(yâwnú9ß[š‘z‰sa8Öó|ö?иó“7œ@Dd&ËXÊ}?ü* 9Ư~ü+:“§G:Ç”÷ ÝúU »VGÏÙåϰà¾:~x» Èæº;oaÞÑÇ)±­f×pSËe\ÇmwFo»IièØM'þf2I¯oq×~ Ó‹ë)²9yü[ø‡ß庡.a#õãÎà–X?®m1w%$é`˜IÒǵ_Žæ}WÒãUDfõõõõMu"2²Hä|ú]Û^ž[çà'‘àþùíh®B‘™äÜœã;Ÿ¾›Ò'à¾ú=h‹ˆˆÈØ †1o«;…Ddd½/°ÎfÃv÷/¢ßʆ^ã•߃añ"%„DD¦»)9Ç·óâ3Ç&}蘈ˆˆ$§;…D¦‰©¾S¨ûÀOyè§ÏóÚ‰ÉUw~—~÷vb„˜ˆˆœŸÎé9¾ý1le¿Â0ïFîÛú÷äéB"""2ã¹SHI!‘ibª“B""""""rþÑð1%…DDDDDDDDÒ’B’æNñV×»S„ˆˆˆˆˆˆÈ97¡I¡·þã_Þrw×HköâÙ~òzGWÁñÊ·Âs|¬Š$:Å«»P¶ëM:Ð3ÕÁˆˆˆˆˆˆˆœS˜:ÆK{ößð´+k#Ó@o7Oúø˜>^z90#CáÎ]|§È†ÍöB£Ù²—çÖÙX÷Ü(µ*Lx¢‹ì}Žu¶uŒºY£Ún¢âž„ö‹Hšhç1›¯ý¢}Ày$zn¬}ŠÂ‘óÞÄ%…Žœ 1ã¾uÃÅœ8ÒÃëVp‚ùùÔU^‡cþd.iÇ´˜‡W\Âå3"1â÷ÿú8†ïºiiy˜eÆ©Žg4޲ku1OL».÷tm¿ˆœOÞú×ÇyþèTG!"""ÓÉœ‰)æCþÿ}®X’Ç‚ÅPÌÛ¸ýr¥å¢³«ßäGÏžÀû|î³—põ'±çwPþÔû\}Ã…|ù}Þ®¿nßžó½ü]Ìâ¦Ì<¼,ë̺Å÷^‡ƒèß¹W\Àþø]Àóðˆ#—̉i”¤Ì%W²•×Y¿÷}ÞŒ'† º¿M;B!X´({ªƒÝoD&¾XÓí<ÚrûÄ—{ÆDÅ=Ií‘´rã™üócÏñ¥Gog:^ DDDäÜ›˜;…>êf÷Нš dãX2ï‘nÎ~Ä9ŽûÙ¼“{ÿ^y-Û¯þ?H,à4MݳµòZþýï ¼zè(?=™E]åµü¯ëfóÒËÇxuÈŠOs0l¤®òz~s:ÞaçÒ"I#™K®dë´¿c¨—çÖ9øÉïáWe6lñ±¡vv­[ÝfþbOJÕ}€Ÿ~«›ÍÎê?O¿œDï!žNØnWgl@Bïs¬³ý˜Ç~±»ÍÎc‡Â±r¢ëÚì+X·«søaPC®ÛËsëÊøÑØÏ sHÃ:v½*g_ÁC/tÓ}à§|«È†Í¾šèî·î™a`Êì×ÉŒ&îaû!W0›â£ëΛA’šãÛ¹s9}DôþFÆ sÉ•<|]üƹ>^z¹‹¦©œ^gÔLÜþ¨‡Ü7þÀCË÷–Ý<³¾Œß}i+ž–¼OÞ¡õðL7Ñe=@÷-u4µx©»­—ß½/«›g(çPl;ÏËx¦ì' s=Ïk¦‡ñ¶xùÞu'x桇à›x[ZØ¿ýœxü  ÙwÝìkâöG·óUà«Û[øÞÒTbø=ϼ¶˜Gš[hzd~RÂO»ïdks ž,àù‡žapn¥™Þ'—qhý)̽4ʸ‡ëóö¡ìù/±ÕÛBK‹›oò{±7öÚ ,GDd y|õ‡_¥ó¡'øÝ ÷C#WEDD$M@Rè8ûÛ?î^þÛ–ƒ|yËA¾¼+DŸÐørlÂ铟ð³¹tn|›KÉ›XƧÈÌKÝcÝN¤¿SG^ç¡Cñ1³¸é†\ŠMSÒøýϾv'÷Ü™‡ ûv¾qçk<û»£±e·p×mÑeÆë¾Á7oLÜ.qÙ=|ãªyæwñLÏbnûÒ¢ØßÙÜõ¤—ïÉ@¨÷(o¼q”0ô@¨Q¬›B ÷Üu]tÙ⫸6r-·Ü]×” ‘!JP&Ùwñ¨÷‡)̽4Ú¸‡ésƒ‘yG_äÅ:éùÒ÷½ÿcÊøWíùyãh–ˆÈ Æe|÷Ïàüç§¹-–û™”󪈈ˆL{ã>öÑÛ¸ÿ8›’†øø /ÃyxüoÁ„㞟ü +·¼ÂªpýgÇU³Èøõ¾ÁC35!°èKÜyÕ³ìz¶“@è}ÍÎCB°h_½ñE~ñ/‡¡ögyúÅÄí^ä™ç£Û…íâéWnä–ë†ê¢7Z‡v=Í+ñ£Z×€iDN…ÇCªýq¶LB¿ã±»¿Æ®”~¾y4qÝç¡?ƾî™èäÒ&²³1Ï”ËO (GDdœL_ù>ÿù,ÏÆç‹›ŒóªˆˆˆL{ã»SèB3ÿTi¶è+ï´²/þpn.ß*Ëå[ƒÖ3QW™ðp~~ÿÇK—ò›¥C-° YW¹p´-tfÊæ^óIÀ 3-!@6w=RÇ©¯Çñø "™ ùûonçáØ$:·?¼S=€ÃvŠÌïãžÛàÐÛþ=ßÜþ0·›€SO˜–qßwŸg}¹_eÞÈ7þ÷ýn=¯ s{vÆ(Ö]ĵw-æ_Ö/çÔ#M<¼l1Œ±?˜w·}ÿÑèÝ?ÉŒ#îÄ>Ïà»Ôuÿ˜tü3o20ïªÛøþÃwÅ~6z`9#Nt$"2‚ln¿ï>žþýg{^‘´5«¯¯¯oªƒ™:§x«ëcæ^:ÕŒ(2ÔäÉ""""""’Ö †±WRHdšPRHDDDDDDORh~’^DDDDDDDD¦%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""ih’B½x¶¤ü…Þ1.OÕ‡D>‰ýy¼ƒò-‡ðb™È¨œâ­®w§:‘snšÜ)ÔMcm;??2Úe"ÉœâÕÝG(Ûõ&è™ê`DDDDDDDΩ9S@j>æX8ááü|ê*‡Y&’ªÞnž ôñ1ðÒË^–5ÕQ ëí·ßžêDDDDDDä<ó…/|aÌÛNNR(ø&?zöÞàsŸ½„«‡vìâÉ]Çp¿Ìžƒc¹™ï\sitHØSï“{ÅüáÑ|18.fÿöwØ °ï p=ëæG×-¾w<›¸l)×ngOn[—ÍÖ×Þέ-P_–Ï‚Ii¬L[¦Å<¼âuÖï}Ÿ7é;ïCã9ÐEDDDDDDš„ácÇq?{‚wr/ãß+¯eûÕãà Ë~yŒÃÙ—±»òzö­¼˜?¼øgÜ]ñå§96RWy=¿¹Çï°ó Ž²Ï±Xqëõ¬[šX×ÀeqÃ’ yýÇ9[ãõ?~ÄŸÏRBH†”¹äJ¶®¸„Ëâ‰! %‘40ñI¡®¸?0à\–M&sȼ濰úÓý—}í+Ùdä^ν¹§q·‹­0 §Í[v1—ÓGd”Cà W¹éDˆ—Žt³ÿÏŸbùÓÄ´Mf$%†DDDDDD$Mü𱓟ð³¹tnü‰KÉ›¸,†ºƒý·ÉÄþø™ã¬ÿÂËpäþ…ÿ÷p/Ž%'yiÎÅ<’;Î2eÆË\r%_áë‡>!šê¢éª,Š•O‘jâ“Bsçð9¼{˜ ðï¾Ì‹/3ðíò«ù» lw¼c‚˜Ã5W]Äü…ÿÃpE '¨d™¹Ny‡Å'¿šÅM7ä*!$""""""3ÚÄËý,%ŸŽðó}Nñ §Ž¼ÍÎø?¹óp^áß^èæÀÉÖä'G(ô ³áTøÃÔ–-‹#âLJÿÆMWh6!IîÔ‘ødÓM™ÏÛɦEDDDDDD&Ê$L4mÂqÏŠOþ…•[^aÕ¸þ³ñeóqÞóY¾Øý6+·äËOöÐe^À—ÍMV ðnÊÅKûÛyàÀ{),Ëfùçgññì‹Y¾xB'3ŒB""""""’®fõõõõMu“áÄW(=‘ůïÔ„B2ŒÞ7X¿ã¯”‘t3ñs zØä4Å˲§:9Ÿ™²¹×|’pƒB"""""2 œ>}š®®.þú׿rúôé©G&‘Éd"77—Ù³gOZ3ïN¡ã”?õ‘üluäFÞ^dX§x«ëcæ^:ÕˆˆˆˆˆˆŒèÍ7£“_äääpÑEMq4â÷û±X,^n$¡««‹Ù³gc6›'¼ü¸™w§Ðü|ê*§:™>2Y¨†"""""2Müõ¯¥  ƒÁ0Õ¡È$2 äääÐÑ1Q¿Ô>´I˜hZDDDDDDD&‹Béᢋ.šô!‚J ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRH$-„ …BÃþ Oux3Ú‡D>9‡Õï |Ë!<ÇSYùÇv¾h«Ájµ&üsáéêùè?[môxpÅ×$LÀSMi± «ÕŠÝYACGâQÄ[]ŠÝjÅjwRåŒî˜ zpYkhK²J¸£W¼þÒ*Ü)Ô¬½C.®ýý"¡¥Ú†µfˆhShÇIû{¨õ[¨)-Æfµb+v èÿTõàqYqÅ* w4°ÖnÅj­Â¼öHËS«'Ù¾“dÙ¨ú§»RÚ¾VãÚÿE$jà±8–"FynIÁœ©@D&Y¸Ú5•x1‘1Ü*álÊjk(Î9§‘¥nkßæØòëY·tªcè|Žmrõp÷Ïx©ÊÖÿ˜ÈªÀç«8û8à¦t•ŸÒÒ†/°£×ö ÜÝ‚Í=ÞjÖ¬ÙLNÓFìÆ0mµ.ês6âñ` ù©Yå¢q©‡Òü‚ zÙèÚD+%” »Rïö­Pök|΂nw¬©#¯¥‚‚de$i/Iú)‰pÛvjöD d,í« îÊû 8~MK}! «ÎôÿXË Ñºs+†Š_ãs ubiyŠ’í;Á$ËÆ\a ûcp”¯UÒýÌŠˆÈ µ¹Ù\³o{/‘Ì¥Ü]±‘ ‡9åk|j‚xÖÖÀOjpdM`|=\·vRæá½Í$Ä•”™áz¼Ûi/Ù;Ù—Žzœî6Š]c;ͦ*†ŒdWžW˜n>æØyû…ùùÛäêøÉ˯áMPžšÍÜý`›¬¯òKñxÎ>̲;°GÖàlÄžçdz3ŸÒýŒ*WN"ä¯am…KÑÍp4ÉŠá6ü¿µPT=Æs ‹°Dš ô@AÊo|´—Tû)Q€½uÝä7åvŒY/v0ÇÚo´íÿ1ŸÒ„B`6wÞiyŠ’í;I–e±¾pòýqL¯UÒ6Œ1N™Pá¶J×Xµc7ÕùFðT®bm¤z§ykê¥Ó!+olq¥ ™é"`À|“‰œÈd¦­vË——R?̰‚pG=¥Ë—³ª¶-…¡5§xÝó wl9È—·¤ô—oðÖGï±ÿäGþ£«}ô'~´eàãÃì? ßäG¿8È­±2Ê=]œ‚Øð«Ã<ºÛÇ­[ò?þó”o9Ä“^§,¶îÿû8oh£tËA¾¼ÅÇC†º»ÏöwØ ìÝwGÛcOŸìâÉíÑzo}ô~þê»ÑçcþRª'¾î í±e‡øÑ Ç¢ñ4d;GÛtÔã§Þuv8Q½?þõ쌰4{„OÕ{Ù(§¼ß×I¿­¡ÔnÅj/¥Ú3Ü0°gŽº@'­ÙydwÇD¨†!ã2ílð4P1Ò(¡^º1`ˆæ†G|²:†moŠý” §©ïŠõ” 7Y;’Æ–j›±t£Eú›i1Ü=þÎ/è¥:6´Ìj+ÆÕÐq¶œ —êR;V«U÷Ò8Çu+›Z¡quˆ¡p=C/OÖ–!ëJ¾3â²$ý3\}#íI÷¹1ìÿÚþ' LGƒ‹b›«ÕNiu0@o••ê–Ø«n¡Ú:ðqldp˜c¥ÇƒËº‘šÚUج6jþ÷3¸¬.šbC(mÅT5 z«£û‡m½Á©è„óO0ɱ?à:ÕoØå°×°D=´T;qÖø93‚u¸ãmàkèOÓo…ÒV½5˜7TãÌ]2Ì86l  #HbûH7Ú±Úkh Óoÿ-v5Э?3lýìþۃǵ†FZÙtkl˜c’2F_¢žCœÛj°ºÆ«ƒ‚4á(û+€·Æ‡hÇýËcξŒÝ•׳oåÅüáÅ?ãîŠo3šzNãþó<ü½ëùÍ=—ðÎá·y²ýÃ1 ×αÄ6qW®Å_ø3š|>ZvØñ»êbó¾tÓé‡Î•±7 ¥Túð¦Å]‡©´ˆþ#¼ü4zó¨öúð58éÞì¢nˆw á–&Ü‹Ê(*"½t› ôzâ–“AÜ®5´ž‰³¿«w,c¾…ü”‡ß˜É‰çs²r0'öE’:†oo*ýÔ¯ÑlßiD©#Å–ZƒûúZÌî;°Z­,¯ °ª¡KF¬Žª*(ÛM‹ÏÇK;VÓ»µoO|Y%ÝEÛØïka›£—?À<µûØP…öá«x»KÖË“µe¸z†êÆ„}gÄeÃõO’ú’î#íscØÿSÚÿdB´mgÍÞB~ÖâÃçóPÆ&jš{#K ií D× tÒ]XHgüqg+{‹Š°“+{i3UÓâk¡â¿ÎZq·ç³Åëcÿ–›ñnZÉæ “Z¯}²Ù[åNmþ°-Ù±ßÿ:ÕTmƽfSì:•ì á¯YKMFõ–è#o ¯¡e&Ý•-#êiÃÛ^ˆÝ2àŸULE•³_´b(jÂç­  £ƒzW ”íÆçk¢ÚìfÍ&o4ò²u­ûŽ|> «Cl­óÒCŽÚ”PȆ}µ8²’”1¦øRÐÖHe°„_¶øðyk°4obo`´q%öÃh*O&@êÁó8Z­VlU^¦úë%…Ddò™W±eË  H M,bÅ–-¬2PÞìOa„§¥›·Þ¿ˆå_¿žº¯˜X`¾‚ïq 8øˆÜæñÅ®“¼¼þǸòŠ,2™óÛ×óO–9œ:ÞÃë9EœŽW0‹âkœyì´™Éd™Ÿ¿+˜MñÙd2‡yó/Þ°ºNàþÀÀ×¾’M&@îåÜ›{wû±1ÕSl»œ…sâåôÑtä/Ö©£‰m ´ân/ÂéȾQÎqRÛ›ß$ÔKwdeÕ4µøðyÊȨqQß‘°}¨•½{ qÞ<ðmH6eåŽè›“¥ÎnÜÞÎþ«=TVú)ÙXr6ÁÒZG#¥Ô{}øöo!¯qMô[Ú@+îö»)qÆãtPêlÇÝ˜à¾¡Ž¡Ú›J?%èhÜNFEÉ€$ÚxcK¡¿!:oÚZ½«÷áóùxi[ÞÒJ1®„~˜0fVmÙŠEÂÍ[Ͷ v¦zš#Í)$"çDŽ}#µ[ÀU¹—£±ÄP¨Â‚¿fgBB¨–ö¾X¼˜í·¼Ác-oSöòÛp±Õÿ×å”,Ʉř,ÿ÷wùÃÉ÷ k67Ü0—S/ŸäÍ“½œ:>›â¿Ÿ |Èëž?°¾ã4™s¸:ûSÑDÈŸ"3#Ùã¡{á_?ϸ\DMå§û¯pòÞ!†ºƒýŸÏ_HS«'j6WÌŸÂç°pÞl>>1p‘Ú9𨦑P€NL˜†ºãÁh§ÚgOxlÃn¿Ÿ­”æG?n‡ÛZh.,bý +´“éì£Ey…D?‡Û¨qm†wãê÷Õ’“²øpœŒlöÛ` ÐÉî¿iOÿjJ’ÿ”UÇÅ­›Zã+³c_ ŸC¨'H¢ãæC#×1d{“ôӊκþõï6S×YÊ–ÒQ¾JÛÍ0\jÿØyÔÉŽâh2ÌE”mbssŽR3 ÷³vk¦< –ÂŒ³ í'Y䘇uP¾Õ©·%/…z†Ýw’-f±]Ãí#¥ôưÿ§°ÿÉÉ/eG­­µ÷³ò½–ázpÎü È·`ok¤3d‡V–6¹ „n&äÏ£¨::qÚ°Ç & ™ÉË ÉŽÅAË ` úÙ8Ù5 €N‚¦6»ñíœí9â9U¯YÚÊ0b$Â\ƒ wGzéînäëÖº„å…¬a:ª¨Úþ[ŽFL,ÍË„¡öפeŒ%¾ä—P»¢ŽZW1¿í5=™v²¸€~ý0‘rìlŒ~bïÑXBh‡kïF;%…D䜘ڹ9>™Í(Bº” IDAT\À‚k®ä‘k€ÞåõýÖÿÇ[\½äJ®ä³\Ÿýÿ|>ÊÀ9÷RÈ}“ÆÃ'x—‹ùž è}›_tü §óZ¾õù9ì ,ðþ¸Û·à+×ñ›¯$>óûÎÃç0ðíò«ù» l||˜[0†õ7Þ: Ìø„·Nœæ‚ÙVM;“Å6ÝÍäá§7ÄÐoRú ƒÑxöŠÜéßK¶Å9Ä·6½DNA|ÁÑÎV †Øï3õ´Psÿ&‚¥»©MœÅl¡ÿ ïόƌXœwSþR¶Q¼!ÈrÔâs$>ÓFž!rö‹ùH„ÙѧF®cøö&:ÛOëïñ¸ð7·²¼9qý5XØWë¾Üdíï 0\j[Íða÷x©Û¢l·7zbÐÃÚÍ õ'î'!z{‡.fpŸ÷Hµ-Îäõ ·ïŒ´l¸ý1Y»’í#Ãþ?Æ}\Æ&˲ŠêúUî¡cï&ÖT6²ÔSJ>y®ðÒÚé„ §1 ìÜ-~z±³ÞHòcEÆ&Ù±8hY$>W ×°BJË7b±¸X¹¹‰ÂÚâèa9â9UÒ–1ËÒV¼þŽâ„*ÜÂÆ›¼½T…mÐF – ìÞæ<|«£‘Ê:M>ìF·TsSãP')cÜñ… … õ»¨f`.® ¦¸B~j׺hÌø‹¯Iâšìq\±Ä¡&€£ºô¼H†‰È9M ņ’£OÁ©ßæÖíGxë£OàÂLÌŸƒ!#~ÌE\“k`ÿË'8lº˜…ÀÂì >É©Ïg± ±œpð/ÿgˆ7È'ÒÄ`˜ §Â±¹~rçá¼8¿½Ðúd€GkòÀ“c(»¦—ßäØ'@×›<Õ5 ÇÕC÷áÐíœÌئ˜¹çÒfÜžŽØØ÷jN@ §£†3£ez¼ìÝ[HÉÍæø;#æ™—K'õîØ„žAõîE”@ÈËÆ••JvP3ðƒ»ÑBQ‘wSì]FÈ×»ˆ¢¥æXœ{htÇãôSã´Qåí]yØV´ÑÜ8"ØÚŒÿfKôƈu ÓÞûé¬,G->ŸïÌ¿%@É|ÉB¤Û0ý=¨ùE¬^äÅŸü$ØLãÞE8 ãïCÑW„ð7ÖGS"À|3%…ÍÔnÖjsS?ÖÏÀÉÚ’¬ždûN²eÉú'Y}ÉöÇaÿŸ°}\FònÄærG'—ÎÈ"'ÛŒÉdŒå2(°ÙñÖ×ÓV‡0›-øwî$TT˜pœs¬ÈØ$;\§BþFêý…Y²’_Ãä8*¨mbküxÒñ&ÃÊaEE MU¸;bûC¨wUmåΡ“öE”ëiôG×z\ØõtDBôš20bå4î¡ÿé"ö%B²2Æ_†#­´¶GçÑóº½gõx\8ÎL¾nL†^Ž.®I’c§ªæüI’B"3^†ÑHoh˜¯¿cBmM#üBÙ:›}B óÆÏóHö‡¬¯{…/oy•Òßú;¯`a|ù’ YøÞid&úÄç/faøS,_û`ºŒoçÏfÿÞW¹õÑ79˜?»3Nóz÷À‰šÇë3Ü”;‹—ö·óÀ÷€ù8ïù,_ì~›•[òå'{è2/à‡Ë採ìÙÜtÉ<ðØA¾¼ë}>wƒ™ï,pógÒvNflS-ç–mXZï§ØjŶr;áŠÚè·ßæUlÛh¢þŽè/fØ×6³t[5gçGî¥;CÞ9QHI^.»ë*7ÙÕµ”æC y;{OEhÝtG¿‰]žè$¯ö õú+±[­ØVÖcÚ¸-öUÿ8­Žj‚Îl°x{ÓXJ7`ܾ«ÕÊÊí°¾Ìû08RôwÄ~š#Å6tn~®Ú ¨ÿzôwÖ4c©õq–òõfö®µb+®¢Í^Aù"?m0…£zÎ@ÅV+·°¯˜Œ¶ _O²}'ù~•¬’µ+Ùþ8’±ìÿµËHŒö ¶¶PQlÅjµ±r{˜Šjç™oÁK 1·¶’c1GŸÈ³`î4`·Äê¤ÇŠŒM²c±ÿ±á¨âÜ?Ç&¹†õcfEE UµDLNÇ› /£ ‚mµZªÑó´c3~[-õÃ^ò)ÝVF¸6ºþ*w6ÖÆæ‹+pRmñãºÉŠÝåÆT¶›ýt`¦°jVºðô$)c<ñí”ÿ¤€f×MØVmÇXä<³(Ëñ áZV+VG ¡²êèÜGcŒ+]Ìêëëë›ê Dd2ñVW±qOûÐ?YdZÖ³íg«È?Çëp8LFÆy”&Ÿ.ŽwPþÔûß{ŽùSŒˆˆˆˆœK~¿‹Å2ÕaHÌd¿“]¾æ™ñr°WÕã­šê8SBHDDDDDdêhø˜ˆˆˆˆˆˆˆHÒB""ÓÍü|ê*§:™ît§ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆL#‘HdªCsàÃ?döìÙ“Z‡’B"""""""ÓÄg>󺺺øðç:™D‘H„`0Èg>ó™I­gV___ß¤Ö """""""âôéÓB¡§OŸžêpd™L&rss'õn¡ H ½Å£[Þaoÿb¹:wßs~…sÆWú°ŽwPþÔûß{Žù“T‡¤S¼Õõ1 s/ê@DDDDDDDΩ KÙ¬¸õzÖ-=øè8M»Þ¢ìž¦¾l1 &ª‘ uŠWwáÜpƒ™‡—eMu@"""""""çÌäÌ)tá|ŠWÎã¦÷Nâ>òɤT!2n½Ý<èãcúxéåè™êˆDDDDDDDΙÉÜ—dñwózyòÏ=|gÉ8ÙÅ“»Žá~˜=Çr3ß¹ÆÈëϾÊúð|öݳ0ºÝÉ7yàÉ“\Ïu8sf»!†úôø¹û/xbë9ÿÛb¾µ$óÌ0³«¯6ðÒáéb6ö«sX÷•d’¤üã”?õ—›?¦)ÐÇò[þ+ÿxMƤu—LÓb^ñ:ë÷¾Ï›ñÄèŽ!I “øëcŸæÒKà÷"ÀqÜ¿<ÆáìËØ]y=ûV^Ì^ü3î®9\¹äb ]ïñjl«S‡Oqpž‘å¹$Ùn`]Çqïú ȾŒÝ•ײ{ù4í}“ý½ñå§qÿùþÞõüæžKxçðÛ<Ùþa åG8œqû*¯QBh†Ê\r%[W\ÂåºcHDDDDDDÒȹùIú®¸?0ðµ¯dGïÎɽœ{sOãn?K.¥xöG¼täàC^íŠ`¿ú2æ´Ýå;—e“É2¯Y@ÉÅ܇ß=³J±íòè¤×¹—sonMGþ’Bù³(¾fpÁ$wL%%†DDDDDD$MÞð1Þ¥ë$|.×'ßã"l¨;ؕܰǒÿò#á;æÙß}ޝ_]~ò“$Û%8ù ï0›KçÆŸ˜…apúo±Ç³¹b~¼©sX8o6ŸH¥üO‘©„ÒBæ’+y8ø _?ô ÑÄPMWeQlšêÈDDDDDDD&Çä%…z{ØÿÞ,nºb\øW>‡o—_Íß]8xÕ…K3™·û]^}í4‡¯ø ÿ_0wÎðÛï8û÷Ü9|Ž0ïžæôù˜¿êo¼u˜ð o8ͳGQ¾Ìx§Ž¼ÎC‡â“¢Ïâ¦r•‘mr’B'»ñì=Éëó.凋æá¼ø(ÿöB7×8²É<àѧþÂ;WçñȲ¹;GÆ[ü¸åS”Ü™{¶œÜ$Û-aÐzîÝ\ïø,¼zŒÆf³zé¥Àq ¦—ßÄùùËYÐý&OuÍÂñßs ÷DjåËŒvêH|²iˆ&„ÎÏŸ§ÿÓŸþ4Õ!ˆˆˆˆˆˆÈyæ _øÂ˜·°¤ÐÞ}Ù»/ú÷³gqen +Íѹ˜óž8æ~›•[Þæcfquþ^ï5Ÿ›¿Í/ºâLsæùa·;~lØõ˜=çŠ+pÌ'šb67]òfaî¥SŒˆˆˆˆˆˆÈ95󇉈ˆˆˆˆˆˆÈ >&"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ÈLÑãÁeµb­i›êH’g5V+ÖĶbJ«=ÂS©ˆÈ41ÄyÔjÅ¦%8qå»<ôL@icª{¨ÏHmOÛ{ð¸¬X­5œçWf™ãÙw¦j[IGs¦: 7°¯ÖA@ÈOMéZV®°»Þ‰yŠC™Ï£„ð׬bíýU˜]3çÜ…úilìaEEq,– Яm"""2Qt§ˆœŒ*ªËÉnßÌvohª£™†ŒXŠì@;Þ¶ñÞ/P@…χ/¥¤Luw¬¥.g"""r.()$2SµÕ`µZYÛÐDƒ«›Õн´O‹‡êR{thÁÚ:†hõøëqÛÎ ár5t_îhˆ-³SZ]OÍÚþ·Ä‡ å:+hhg2ÇœG!Ð쌯‘t5 /“ì<ü?x8JÐ[Miìza+.¥Ú$:\j ­›¸ÕêÂÓÛvm 5.Vë¨û+V[5þø&àf•ÕŠ£¾cŒ ×Q‹§ÞE±ÍŠ­ØECÂ.Ùõ-ÚW~[SŠÝ:xÛRíǵ ~BÊIãÙ/Gó¾kèãUD$JI!‘ÎßÜeKûv7‘ö=lÚÜIq­ߎÕý[©lŒ½ 5±ym”ïÆçó±»ÜDëÖJ;€°Ÿš5[iÍ«à×>/µÅ½´ø* yÙ´j­5ì÷ùhXaëîñ¼çÈ0’hL¼ˆÈ¨…hñzÁP„Ó–•ü<=Ò9~ ž&6Wî!´z>Ÿm+zÙSY…;˜…£v%îå«Å¿µÈ ïÁ|¾Ÿð­»î†È^¼ÑEA¿‡N²qæ_gë&n0§Ë3àêàï$RTKSÓŠz[ÙZç^?’]ßÎlë'ì¬Çëû5y­l]Ss6iÕ¯[SïÇ-ùí¸“õ£¤·ñì—£yß5ìñzŽÚ)"ç=%…Df8ƒÅB~dä`°a1ÙfÌ@woìkLc15/ýšrC+µÕTlïºé^öD pÅÍäF‹“Uygëù›iŽ€½È‚ȹ¹ˆBÚq·ÎaKEDÒ\¿ÄI1›v~Ò°»q„óôçøÁ¢×£u÷ãªñ€s7>ßóåÙ)ˆ-ϰع›îæ6 ¿·²$Ë E“L>| ÿj³qs`\Š¥hm§’_ßÎlë¤È ÃÍ+ !²çLÒ*Q*ýXä,J±%­c¿Ýû®1¯"’V”™á,æìÔVìiµüÖ40;*¨qž]`Ή¿ÏÄ`:»i8½e¹qMìÃÈ­›h:ãBEo¡6çhbQ‘TÄ'û·•°ˆ¡ svü<=Ò9~¬"Ê×[ÈŒôÒÚ¸‰5wÜ4h8ò &™ñ¿3,ßm âõÓòãm…lg!ÉrBã’äú6”¬3ÀàûTSéG“É[{„~”ô6ŽýrTï»Ær¼ŠHZѯ‰=­{i@IEŽ&Lše6­‚=Pœ"Ò›°±!ú_ÉP “V È¢¯YEDFÃh)gãêÖ쬣²®€Ý–äçé63IÏñƒd¿jÞU!m-´Ôogëo·RÙhÁSšJ„,-^awm¿%›ò¤· O²ëÛ‘³S0õ˜ÍY½×è¬ íGIgãÚ/cC¿†~ßÕ6àq²ãuòŽ;™>t§ˆaŒ~³ ö@¸Æú„‰òìÜm€Ö½¿%„ünn«Ï²9)2€·ÙOz\X­jÚÆø5T¨wÝvº•³Úny}IAAÙFJAwãFêÚÂÉÏÓ#œãé¨Çiµâr‡0³ªÌI`6™y 7Ä©d.-f…¡›={šG:6NI¯oqþzý! Èo÷¶‚ánìC|'1¡ý(im<ûå¨Þw%=^ED”‘£½œ-+òhýÁ­Øî¨!츛¥€¿-*v¬§°³†;lÅTµåa·ôÛ˜ (l«`¹ÕÊʺwÿd©8Fñýx²dwCéä '™É2 (ßXÂ"ºiÜØHG²óôHçøòK©ýY ††UÑsöZ7ÙåÛØàÈr°Ü]„)°••ÖU4†‹Ï‚ýí&¥¡cCL4ø ˜É$½¾ÅYl˜š\Ø­wPÓYă X†º„Ô RëGW[Îä#‚$ 3Iú¸öËѼïJz¼ŠˆÀ¬¾¾¾¾©BD¦›<®[ÙÙÀ¯·9Ð\…""3ɹ9ÇwÔ;øz”ÿÒƒF±ˆˆˆL Ý)$"#ëiÂeµbuÔF¿• µãoCžY !‘énJÎñm4»»'}蘈ˆˆ$§;…D$%Ao5U›÷ÒÞ2Yzw+˜G1BLDDÎOçôßVƒuM#S!å?Ûª|]HDDD¦Š’B""""""""iHÃÇDDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHd w4°ÖnÅj­ÂÍ–=x\V\žžÉ -aÂ]d—ÕŨ›5ªí&*îIh¿ˆ¤‰6j¬VœµmÎ#Ñs{MÛ…%"""ç=%…DfŒ­;·b¨ø5>_5vãTÇ3V-§nÚ}p™¨¸§kûEä|rtg {S…ˆˆˆL'J ‰ÌaB!0›s¦:1ÑÝ™øb³ÔújqdM|ÑQ÷$µ_DÒJa¡‘Í5‚SˆˆˆˆLJ ‰Ì=x\·²©×X±ÆÇ „Úhpc³Z±»¨÷'Œ‡ z©.µcµÚXµq/ýr=~ê¶kèˆ Hèñà²n¤¦v6«8VNt]«­WCÇðà†\·k Dc?3Ì!i .šbCålÅT5 z«)µ[±ÚV±Ñì·î™a`Êì×ÉŒ&îaûŸ‡26QÓÜ{í–#"2†|J6–ÐQUKË |ÏHçUIGJ ‰ÌTVÜíwSâÌÇã ÔÙŽ»5[V„Ó]f´¬¦¬0q»Äe%”4ãn‰gzòpØÌ±¿spÖ·Pe3ê ÐÙ  =jë¦Ã*§%º,¯KÄBQQtݬœˆ Qê€2ÉqRÛ²1…¹—F÷0}n0b 4ÓÜÔA0lÄVÕBíäk‘4•QPF•³•ÍuþþwmŽx^‘t4gª‘I ÐÉî¿iOÿçKB ¦3 ‘,r̉Û%.3€„,ˆ Cfüï0 ÷³vk¦< – â›õx\ܺ©5^);|å†YwèØS!EƒÊ7Ú+ØVØBE±«ÕÆÊía*ªäÀ刈ŒWŽòr%<žøóªˆˆˆLw³úúúú¦:9·t§ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒМ©@DRó§?ýiªC‘ó̾ð…1o;«¯¯¯oc‘i@ÃÇDDDDDDDDÒ’B""""""""iHI!‘4¤¤ˆˆˆˆˆˆˆHRRHDDDDDDD$ ))$"""""""’†”ICJ ‰ˆˆˆˆˆˆˆ¤!%…DDDDDDDDÒ’B""""""""iHI!‘£«kâ?»“Šš‚ñUz<¸¬.<=ã¨æ|)c0O5¥Å6¬V+vg áë3Ìš""""""3–’B"3LÉ>_ì_S E¡Vºš˜ÐüËù¨£×ö Êv·àóùØí2Ѹf3ÞÐHhXµœº¶s¤ˆˆˆˆˆÈùcÎT "“(ÃLñƒø—ofoG1¥ùj}Ž©Ž ²&!ŽüR<ž„*ìì‘5ø±$«?Dwgdbc™t§ÈL—Q€ÍÞMs{`À°©0 .ŠmV¬V;¥ÕMâc¨bë5xê)µ[±ÚK©ö†bôR]ZŒÍjÅj+ÆÕÐA˜0þjM ·é´Õ`sy¢w,%ÆÑãÁe]KM}u´®3e$–oÅXOÍZ+®”ÆE†¡©¿k @ã+5ñ»…Bm4¸¢m²»¨÷÷ôï—¦ÖÆb­j ôÆc_ÅFo|°^’þ9()$2ã1™ 30`UÛvÖì-äg->|>el¢¦91ÙÒJ]³‰j¯_ƒ“îÍ.êÚf5‚¸«ª l7->/íXMïÖ:¼=,-^Aks ñZ;ü^,E…d £Ÿ–Þbj½>|»Ë`k%ñò+é.ÚÆ~Ÿ—Úâ^¼þÔZni½¨Œ¢w õ“…£v%D‡ÝUÄêt­¡µðg4ù|´ì(ÂïªÄ}fb¦VÜíùlñúØ¿åf¼›V²9è¤Öëc߆löV¹iK©EDDDDD¦–’B"éÊ`Äh¦¹©ƒ`؈­ª…ZGbÊÆ€³ÌA@ŽƒRg7noç€BrpÖ·Pe3ê ÐÙ DôŒ¥Å¬hÝ‹· ƒV· »eè”ä±ÊiÁ“‡™nz#@ÐOs{NG>FÀhqRš—BÛ‚*+ý”l,!4}hÅÝ~7%ÎüX<Jí¸[ƒb5æ`‰X(*Š®›•“‘ØP´ûWDDDDDdj))$2ãõ @žÙØÿéüRvÔÑÝx?+o²b/­ÂÝï׺,äeŸ}´(¯ðL¾ã¬0 k±[‹)½¿Oàgjɰàpúiní@;ÍF–œáb4aÈâéÞNü˜0)4ƒi„æ†Û¨qm†kqdŒ°òB:ÙÃý7ý·µ‰wZ ë@#ö¯ˆˆˆÈÿßÞ‚E}Çqÿ¬hI /¹¢‰+°àYÀ0v”‘°`‚g¢L +žAV´èŠϰ#l¤[Ò%&V°@: ÎÉD§‚{ü½^õîùÿÿ|?¿ß÷p² š†/]¯›ÕõÁŒÍT’¼ù‰­³Ã“Y|8™ìngce!×n.çB{êÕîšÍìì$ÿœ÷úëùzNš~óÚÛür§—éÇLV’lµsý×Õ×?_«§Ûü3Ýt’z#•£®} ’óéf§—ôkÓËìí¼çÿÛkiÎ-dkêq–®þâd]IDATgúˆ{þ™§ó=Ü”¶7t©÷¿_€“e§|Ézi7ïg}¬‘ú¡ÑëÜÊèl«?üøôÙ VræÌÀÁNŸ¼H«Õé†Þjçaë\¦ß9 §×6饻ü0ÝìõÏ%ÉÅï2½³”Fs3c*G_e$õ «iµ7ÒKÒë¶òè𠶃Ê­‰›ÙüñAšG B§rf0ÙííþëžO²Üêß3½nšõÑÌøÛö‡–ó¡÷ p²D!øÂ,_;8ö4:±Ne>¿-ÖÞŠµF1^Mµ:š‰û»i,ÖsS†SZËÍZ5ÕÉV—2ux‹ËÙZfnT²r½šÑñù<«52s®›g¯?³õMFêy9PÏȱ¶Ç ¥¾x;ƒ«×s¹ZËl§’«#ïþçæêý¬¼ÜËúÂ÷¯Ÿ¿Zý˜/•U2\?Ÿ•¹o_…Ÿ¡ÔoßËðú\Æ«ÕT¯.f«þ ?ÕŽ–s>ü~NÖWûûûû'½àf»Ù+«û})Ÿ:¹÷G#“/¦Ó~«(ÇfZ“Ù˜yšù·Îvpv ŸÑV:+ÏS?Þ6¡dw-‹Õ©w)ÓneÜ`€Oæø@((P Q @¢@D!€‰B… $ H((P Q @¢@D!€‰B… $ H((P Q @¢@D!€‰B… $ H((P Q @¢@D!€‰B… $ H((P Q @¢@D!€‰B… $ H((P Q @¢@D!€‰B… $ H(ÐßêÍ2¶ ðIEND®B`‚magnum-6.1.0/doc/source/images/MagnumVolumeIntegration.png0000666000175100017510000014047613244017334023705 0ustar zuulzuul00000000000000‰PNG  IHDR) Ÿ¢2ÎbKGDÿÿÿ ½§“ IDATxœìÝw|Õ¹ÿñÏì®V½Y¶Š{1ƘjªÁlr„ä’ ÷BZnŠEH!7$pCà’@nB‹íÚ5-t¡˜jw7Y²¬^¶ÌïÑH«Ñj%­¶Éú¾_¯EÚ)ç<3»ç™sÎŒAÏ.íÅv½õ%àADzsÚêYÕ¶ÞiàJVǺßçÇ0¾ï;–üx¸.Ì>oiÀ|ÀçX÷}`a ãûð‚cÙñÀÍÀÀ²0û<x€K¿cÝ×€ccߟ€·ËfW³Ï]€ø&p¬;˜ÃøžÖ;–M¾l ³Ïõ€ ¸ :Ö-&Æ0¾-ŽeÃÀàõ0û\ØßßÃÄ7ëßM¬¬Åú÷ª˜¶†Ùg6`ï¦c]1Ãø*€zDzl hö‡Ù§°ígu ã‘8{«a«×%aêø|Ûºp D°+HF|´­û‡â‹K|¾¶õž/ÕÏß@ŽïÇî‚J²â‹åÅ‘A%\£ÐéRâÿ?Û§€!tí…°•´ý ·þ;ÀbKE˜e/Ç`])ç8¬+ÅÎ^ €Ÿ÷Ç$2ËÚ0ËÞÎ |ìc]iwöR€ÕóñJlBºö¢€Õƒr%°£›}®ÆŠÏÙ VÏÇ®˜Dfqö¢€ÕƒòK`]7ûüšîÏß‹t½rßÎ^°zPVþ܂Ճâ&üù[ ü36¡]{Qj€5„à}ºï°-6¡PfYc[ábëßµ+±p2úK¸òDDDDDDDDDDDDDDDD¤WÂ~qq²‘î¹’@ÙcË5N\DDDDDDDDDDDDDDDDD$¥­Äêua²‘î ¦9)ö±†{Vƒˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆ¤Œ'±æ¤œ—ì@DDDDD¤{ƒiNŠýœÍI‘AjâÄ!yS§ŽÖ.‘Ê“ŒJgÍšð·üüìóãQvSS«·©©eÖ”PÏggÏÅ£né¿„')³fMztÈÜ N=u¡‘››Ó²yd¥YT”gìÜYñL˜Õ=='e-°¸È±|°˜‰5ñ:æ·Ø–‡l³®¯q‹ˆˆˆˆH‡„&)³gO~±  û”¯ýkÆ„ ãbZöý×de¥ ÍAà)Ì&gôPÄÍXɆÓR`E„e3ÚÞ‹ˆˆˆˆH $ìî^³gOy9??û”+®¸"æ ÊÊ•QUµŸ#†bš¢,ÆN:BŽm¯›C–•cõ®Ø–´-‘HHOÊQGMØX\œ7yذá¼ýöÞ~{MÌÊniiaݺÉÎÎ0GfìØ±¿»M_NNmû=œtî%YB×^”•XIÉ ¬¡]åXüʣ‰_DDDDD:KH’’››5©®®‰ºº­æ¶m[cZvss«‘••άYœóDœzóœ”tî%YJç^[yÛv+Ú^š‡"""""# IR22¼\~ù ó{J$úìŽ;þJ]]]o6=¹Û¬Àš—2ñÌÉîMén½ˆˆˆˆˆD)!IŠÛíâ¯ýGÜÊ7Œ˜æ>åXü ûd]ۺ鶑($$I MÎ<órr²c^öcýÓ4{ÞÞæ Ú~ïÎJàom¿1Âvv’""""""1”$Å4MÊÊŠ),ÌCé½îEéÍœè˜_²žÈsMÔƒ"""""Iyâ|’Ìïö3»Y)#Šù|‘Á(aÏIéÁ”¤¬L`N²‘î%l¸WEEÿú×ûý.gîÜ#:tH4»övNŠˆˆˆˆˆ$QÂ’”††Fª«k™2e™™™¤¥õ½ú¬¬ÌhC˜íŽ"""""’8 KR<ž4Z[}|üñ&.œÏèÑÃU½ˆˆˆˆˆ  KRÜnƒŒ oûû7ÞXÃo¬és9'Ÿ<²²âhBøë®]G¶ýn[Šõ”ù‹·ž¬mû½¸9y¡ô¨/±šXŸÏ:Çï^¼ÏÑáú ´ï"""ƒRÂ’”––V\.Ó§O ??—ôto{uUPmÝÍIYNlcÎ'K†6†ìuÎÛ/Çj\¡1, )×ÉyÜᎣ»óz,KË{:=oç¶vù}Ź®;Îs¶Üñ~ŸWOß«%t=‡áDúÞ8ãvžßp߇î¾;±ú.ƒõï¾§‹"""’$ ëIñû}dee´÷:´¬¬Œ>—Ó»{m&S€MX štíEYKÇ%»QzÅy½¿==Lù+±’»îr¬ž—ò^”7“¾ƒqÆÛÓñÍh{o7Ã]i^Û¶ÿEmïÃ%)ÑÄêdÇhÇÚÕ“pÇ.¦µtÌ—±·ôyÿÎókœ ƒs{ç¹µËïk,¡ëÂÕk[AçÆº³!>ƒ®ç°»ÏÊ>‡v`¤^‡î¾7=ßpº‹'–ßåu!Û¤Ê7i“°$%33¯7-[vPT”O^^n¢ª‡Ž^#{NÊtº6ÖìÝ ÇÏÐÆñ{QWèÕþ‹Â¬/§c¨Y<†›… ·7Çg7äºkÄÙÉ]h¹áŽ1n¦sB®ç+Ò¾¹1jËÌmízÂ}ÞKÛ‡îûáL(úËú^Ôk[OÇçlÇ:úwC‡&v÷] w¾íŸ‘Îo_Äú»¼žðDDD$É8qÞÇþý•íïŸxâ…¨Ê9çœS5ª,š]'õr;gƒ%šŒ}ؾ‚ë¼"l÷¦@âï0Ôßã›å~ÑZ5ìÈnLƲ^ûXz3tÏÖ×úMz×)–éa–ug=[¡ ö%m?c}#‰æüöE,þ­ŠˆˆH JX’ (,ÌoîUV6Œœœì>—ãñÄ=dç‰Hs&zb7šœWk퉻ázsâ­¿Ç—¨‰ð6{‚ózâw®ú2¼/ç˾Úß—ÉÙábéëgdZJG/—I|Ïa$ñš Ë«"""’B6q>;;‹ÜÜlvíÚî]{ðùü¤§{ûür»£y;VCmlÛ{{¨G(»gO²u)é+{ÿp {{¨W"Åâøì+ñ¡óPÂÍIén_ûœGšˆÊžcÑ—aJ=±c°‡B9'–wר Rú³;öçn—nûÞÄb'¸½­×.'ôs MöºÓ—›Þèéüöô}ˆO,¾ËÓI|Ò-"""½°ž”††F6oÞÑþ>ô÷¾8ÿü33fD4»:ç¤t7NÝ~†ByÈû¾rÎI 7 %W´!6ÇgOl¶‡ñôöXn¦#¡)°S,ÏUyH ¡ØíxÖ9º(dÿžâ²oŒ`œÛ÷%–¾Ôk' ={Xwû:c‰•HÇéûЛxúó]M”DDD$Å$ä9 §Ÿ>'xê©óŒ-[>mî5zôpòòrú]öwü•ºº:N:éH^{m­¯ººö®µkw\Û‹Ýí«ºñšø-±aß™)Y‰]$ý½ƒ™$ÇrzNHEDR}·ÅuDw‘±¯ì‹‘úÿœ$\†{åädRX˜ÏÞ½ìÝ[A}}--­}~ÎÆ÷‹ý¬eO]ö]¯R%A Ú¶”ÄND—؈õðA‰Žó9R¡7Ù°Ÿ“ÔÛ¡Á©$Üó´Â=ûJD"HØp¯úú&6nÜNZš€mÛvFUN?îîµ(F´ýn»ˆÎÏ[Ôâœ[‘ BÿG:XŸÚ>PÙW!Õ{*’<¡muÞ s‡×„™Xóß–·½Ô#!ÒK KR† ÉgÊ”qL˜0†‚‚¼DVb[š©,Õ“©ôþ½‹$_¸[¢Û×–ÐùåtÌ; }8k¨Ð†¿}é":îrhþVè¡ûÙ0"mã,£·7ÃÝ £§xÂmnˆê2¬ó:ü«»8CÏ}Hè<‡±»ó@„õ¡û‹ÄL‡{eSZ:Œššú¶Wµµ}ùýþhC(ÅúCµ/vG%"""Q²‡EÙ g»~3õr¬ÿw;{m˜ö—ÑyxÕº}. Ù:7¼CË e:ÖÛw¹ìÍ®Ð;†&eÝÅZßEbšAׇô6Î!û”Ó1´:Òyc¯/G$N8Ü«M›¶’‘‘ ÀÎ{zØ#¼3Ï<áÃKbšˆˆˆ$Žs>¨¬@äáPöüçíÕ—bõÀ¬s,X>Ó±ÎfÏQs.ïM½ö¾‘zSBò^ÆÚÓºn&“e!Ë{§óŽNáöe'[¡Ç­‰›„÷ÊÊÊdäHk>É‘GNeèÐ!‰¬¾(†U‰¬XDDD:±ÖÎáCˈnX­³'Å9ΧaLÐ}rú¬«pô"±“.ûvéöE#Åc×é9N3èzÌý‰3\oÓÍt$Z¡±kN¯ÄE†{ååå2iÒx233ÉÌ̤®®ŠŠª>¿Z[}цÐÝœIœÐ®=OÌnäFzˆk¤ ç‘Ö-£ë0¥¾°Ë¶c }õ6¡²Žé½ˆg]ȶ‘b*oû}YȲþÆÙ»¼Ðž ݵLâ"a=)µµu|üñ†ög£|úégQ•sòÉó(++ŽfׄvÛˆˆˆH·œÏ˜²ºöð¡õtí-°TÚ(^²®;Ξ g„=dÌå\Zoè —b%½òd'öð«HñØõ9‡“ÙÏ ³Ù=vì=ÅéÙ±E:v2dϲ·×ÝÊ$.:Ü+--üü\Ž>z¥¥ÃY½ˆˆˆ$ßEtž0o MZìmB‡.ÙWð×Òy8SO·õu–åÜvw ·ÞîñX&æòõâØ>tG¤xìúL:÷:…;Ƌ踽±ÝËMœv|vâ..»œrÇ{ÍK‘¸HX’’——ÃÔ©Úß×ÖÖG5tkذ"23Ó£ ¡ÈòÚ~‘äèÍ«pÛ¬ëfyOåöTŸ}W1›cèýÞ{б7ñDÚ&ty¸ÛªGŠÓ¹­ó}¤aaán,7 îUϺu›:´€ýûDUÎüù³£MR4'EDDDÂYKGã>Üs\D$Á:Ü+Ô¼yGQV–Ð[ g'²20Boƒ Ž("I°$%''›É“ǵ¿ß»·’ªªš>—3fÌrs•oˆˆˆHÌôõŽ_"g }˜ãŽ»ÚŸ“rèPß Ú;{´^ h¶‰¯„÷jnn¥¢ÂzŽâ 'ÌeäÈÒDV¯9)"""""@B‡{M™2¾ýýîÝû¢šŠˆˆˆˆÈ‘Ðá^{÷`Ê”q=oa¸zÞ¨« êA:ÜëàÁÞ}÷cÎ<ó$Æó:‚A³çDDDDD$e%,IÉÎÎêÔ‹²mÛ§lß¾³ÏåuÔtŠŠ º]ïñ¸Ò23Ó/;vìM;vìhn[œ4c=+%£Ï•ŠˆˆˆˆHÂ$,Iihh¤ºº†éÓ'õ«¯·û; ¿ÿþf23Óñx!-Í׿|¾q÷ݪ«>Ú¶í³Íý‰YDDDDD/¡Ã½êêÚ‡{¥¥yp¹ú~§®¬¬Ì.Ë ÃÀãñPZZBuu --Í®úúÆ+B6Éj: Ï÷/‘ÄIX’âr¹©®®áõ×ßâsw¯/~ñî½wE ¾¾idž ;ׄVßöSsRDDDDDR\Â’”1c†3kÖ´˜—kš••Áøñc¨«k¤¶¶ÖU]]÷ÿ›Õ 9)"""""BÜ“”©SGŸg†ñ׿þ#.å€ÉÙgŸÀßÿþœÙØØ\¹qãgÏÄ¥2‰»¸ö.LŸ>æò²²¢?uÔ4Î8cA\ê üÜzë=äæfãóù‚{öT]µaÃÎ{›•@04qˆˆˆˆˆHlÄ-I™:uÔÅ#G{0üñªÀï÷» W lxýõµ9a6T€âxÆ""""""ý·á^.—áÞ½»ò¦x•ßµ>\ ®ßv³úš“""""""""""""ÞpÀv';‰¬ïOS˜ìã4“…ˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆô`,ÖP¯íIŽCDDDDDz0Øæ¤“…ˆˆˆˆˆôHÏI]ñ¸áK¬ÿßë_ÿ(¾»‘1®Od°ô¤ˆˆˆ DO';‰Ÿ)X™ö†d"""""quV»ï³d"Ñóôb›ÆÄ°ÎG€OËŽÎÞžs¬Ë.š¿„)ïd ¸‡:G´ýÌ–ö°í+@…cÙ`6°xϱΠœ´‡)o2ÛC}± ¨s,Œ;ë<ÀQ€ø0Lye@f ãÛ 49–¥Y@ ÐèXgXLÅ0 z3.ðEàÔÖ¹xɱìjà.àÀ7늀J  :ã{¡­ÌX9 똗ø†b%W•XÉVªÅWlÆ:“”÷0?†ñ} xÓ±ìRà'ÀƒÀë €b%x§‡)ï6`V ã»øÀ±ì<ààIàǺ|`%P, SÞw±z:cåvºö˜ž \‚uâ!Ǻà¿€zà{aÊ»ë"E4îź°!""‰upš“2 õ¦'åaàíÖé¼ÒVÊ/…Y×üV#"œW°¸±âìE«¡±‚®½(`õ ¬ kï†m3VC-VjÃ,;¬!ü¹ ´­ë®—b°-6¡]{JÀ:GÕ@C7ûT·½ÂIÄ;#Â:PˆÕÎp`| â²e„Y–×VG¸$3 ˜ƒõg±Mâ Ã,‰• m³ÎÛ¶®»øÛø¡k’2øÖ¿g’’Ѷ®’ðIÊÅýˆïY”¤ˆˆˆDe°ÜÝëà#àcàÈ$Ç"KOÃÑ1\.+A©¥kBÞÓp¾9„O,¢µ†® åH`*ÖØ_g‚àNÂJT_ SÞb¬D/Vžö8–MNÖ«ëzNz1ÖPÊh4qþ0Лá^‡ƒ<½F"""""šž“"""""")e°$)Ç`uû½“ì@DDDDD$²Á’¤Dº{“ˆˆˆˆˆ¤Á2'å4'EDDDDd@,=)"""""")åx¬¡^¯';‰«éÀ À’d"Ñ,ý4'EDDDdpXßö9 –;^ ¼¼,Lr,"""’`ùxÿU­þÀqÉŽc°0Á“ëuÍÌn Þ1Ó <š“""""‡µû?ÜsûÇÕ¯¼_,Lv,ƒË08§Ìl5­•ÿvÌX%(•Á’¤¼Èàé5 Ü4]c?Ú÷à›Uæ¢u5ÁüdÇ3¸ ¸`­/¾ñ®ëÄ#'íKv<2pé9)"""©ëͶ—ôÑ=ïšic>Þ÷ÄKÁó” $†Û€ F­/½ùŽñì{ëË…p‰“Á’¤œ…5Ôë™d"""Òǵ½¤–ïÚ•™–¶÷ùç÷OÛZof';žÁ S‚²f}Z²ã‘o°d¹š“"""2,ßz0¿î`˳Oï ÌÞׄË  Eâa°$)Ï 9)"""‡µG?ÜW|°®ù¹'ö˜3¶àNv<ƒA{‚ò¯5(A‘X,IŠˆˆˆÆûx÷¨ ðÙÇv§Öúta2:%(ï®õ&;9¼ –$å<à àIàsIŽEDDDbhå†=ÇT4³â©ÝÁ±­È,3n“ÈmÀÙeø” H¼ –$ÅþsLj"""Så¦éjýxÏã?CÎ,¡9Ùñ é—ç£-ÛŒgß]«!^ƒ%IyÍI9ì”Fð®5Ÿm»õýÓË`rîØôçR¬$fW—#‰TŽý{¤ÄÖ]ã¯7åÛ>»ŒHIç_Ãî¶ W¦ýsfȺuôýXCëîî<;×…«ËŽÑ~ÚCÖ›ý"[8ÓCöqö6@ì?+'çñöö3qꮾéÖ9×;çvÙ‰Y$7c%ö1‡&,ýç)"ƒÐ¸|/'È¡ÀëÆoº `Ðì‡ÊÖp[;¯k¸ñºÓ˜94“#‡Hs™T5xmw=;jÃÐ.ÏëæÄÙLÌÏ ˆAÀpÑâ‡êVÇÿ`»)ÆmÀè/ ²ð¸‚øAÞ©häýŠFünä2 Ž–ÉÜÒ,ÒÝ.ü¦ _Р1-ÝîÕq (ÈH㸜LŒâÆdkm3«w7PÝ¢„E+¥’”$ëî®HÎeöûéÎ {Ð]9}©±‹òÁúûYNïz‡Â±ÏKß/ûD./u9?»h­»²/¢sïIwòX}V½Íñv÷]³“óî¬ù=Ú;­À¶f'<=}çcýI%O';€hH†›‹§âq{8Ô⢢%º?­Ahmpàuy8‚—Ö€ŸG6Vwi¸» ƒóÆç1¡ ƒšVZ¢ûÃ0¡®¬ÄÉËp3·4Ÿ“Gäðܧu|TÙuü´!éœ;.ŸÆ€‹šVƒ_ßë5Mh X/;y)ÉÎæßgf²«¶…l­‰ªWG$)•¤üñ$++‹¯~õ«”••pÛm·á÷ûinnîOÑWw÷Wõq_gƒÌ~¿Þ¹a”åô†ÝP[B÷ ¶þ”o³¯òÇb‚s¬†Úôæ<÷¶.ç9ékŒ=Ų"¤L{8RwCª"½—ÞoOß5{™³—b©c}¬ cÈe¤žþ~ž"Á9É` )LwsùŒ¡T4»ðµB,¯_´¡¢ÙÀëNãë3‡rÏǕԆ ‰ºlZ!Þ´tö4ƬJ‚&T·À!ÃÍÂÑyd¥¹ø×Þ†öõGgpê¨|ö4ºb~µ¦Á ~ù™™|m†‡ÿY[‰©ª÷ÕÖկˤ,Ó`X:y ŠÒ¬?*fX6+q1Mh˜l6Ø×do“±ï0á““OjL †z¡$Ó $Ý`h:x\Ð܆µ½]wKùL>m€}Aêú8ºj3ìo¶Ž9ÇcP’aRšaP˜aÞ6•Åhÿ¯Ù^o u~ØÕdRÑbRÑ‚îâ%I•ô$Å4MsË–-½Úö“O>‰s4"""r8j § Vã¿÷+zØ®—NL´À“µ ÐQï7©¯‡­õ&L"M*$)Jú¸[×§Evpðkàú>î+""""êЖJz’ü8uhNŠˆˆˆÄ–Z"q“Rw÷Š£Û°•’ˆˆˆˆÄOº;qWû3=u†5¿$1õvn¾eãtÏc“4Cwø’Ä,IŠˆˆˆ îž7ë?²½5•fzÖ¨2=8>_¿ÓÐk®`+ÃÌÚ„Ô%2X’”`uÊþ,ÙˆˆˆHül<ØÄè¬ø×3: 6ì˜"»¾º‰aÞøÿÊpA‹ã± ¦7—Ì à‘ëemsnü+ê¿ûk“ˆDo°$)š“"""2üsg=ei>¦çYC°bÍ0`z”¥ùùçÎúöåÕÍÞÛ_ǩìD"†¥ÃÂâ +6UwZ¾|s5§ R’Ÿz½.8q(l®l`wƒ/>•ÄÖzàWÀÊd"ÑK…‰ó‰ð3Ô‹"""rØ š&|rãʲYT’Íîf›ëLûõLhÈrä\ƒQAÖìoàé½ ]®|¾½¯‘=õ­œ9¶.6Õìïçt·c³ &d©lðñ絇ht<´å`s€û×Vqö˜<Ž.ñ²µÁŶF“@?§ M7˜’k’í òü§µl¯Ḭ2¨ñÔù¬'Ó·8üéƒtÃ$'Í ?ͤ0Í$ “ƒÍ~>ÞßÄ“‡ZFxÊágõ~î[[Iq¦›Ù%Ù1Ì‹Çí¢ºÕà êýM~ë ö“ög*z\Ö߳ݹ(òÉõ@}«ŸÍÕ­üïŽFZüÝgÍþ m=DºÛ`Ö°LŽ/Ì 7ÝC]ª[ jýõ~_|A0 Ã4qæ6ÈvC¶ ½A ½ ù¬¾•nkdcŸ()ƒ%I¹ëVÇË€Ÿ$9I€ i²¡º™ ÕVw†ÇeP–í¡,;a™ÒÝ.²²\ pµåþ -“Êz›|ìmðãö}´xES€gvX“Ì `X¦›Òì4†fy(I÷î6ð郿IKÀ¤5`r¨ÅϦƒ>ö7ø¨óõ½+¤%`òÖ¾FÞÚ×@Nš‹ÒleÙ^†§»Ép»Hw4L²ÝÓ¤9`Rï PQïãÃ?ý/I7X’ÍIäüA“]u>vÕ%v^…‰•´T4õsÌYê}A¶jeˡք×-Òƒeâü±•Ÿ&;‰l°$)"""Ñ›m/‘Ae°$)?Çêm½)ÙˆˆˆôÁqm/‘Ae°$)öqöóf|""""ÒÃ0ð žv¦ÄÁ`ùò܈5'åWÉDDDDäpfמê§C r~žìXdà,w÷‘8s.¾wáiŸM*+þÖ—gy2ÙñÈÀ5XzR~ƒ5'å;ÉDDDDäpä6\|ç ÷N()ùËæ(A‘þ,IŠž“""""'v‚2©´ôª¯ÎýD²ã‘o° ÷úêE‰9;A™\ZzõW” HŒ –$EDDDDbÌm¸øÎ ÷N,öͯ3úñdÇ#‡Á2Ü뿱†zýG²9¸hKPF ûæ¿3î±dÇ#‡—ÁÒ“¢ç¤ˆˆˆˆÄŠgϙޜ—©EâÂèyIû†/úÿu÷}´÷ ;9ÉŽc0Éõež /~iö¨k’‹žºû£÷oÀ˜Öóð©cÙÑÀ™ÀûÀsŽuÀe@3ð—0å Ç0¾W€ Dz Àl`+ðžc8h¿œ äÆ0¾M@cÙ0`4pØéXçŽüÀ‡aÊ+2cß^ É±,ÈZ€FÇ:(ÀúŸï¡Æ!"r¸Q’")É4MÃ0 Ý5U⦻?z/§Æ°ž…ÀKŽeWw¾éXWTUÀÐß meÆÊiXÇ|¸Ä7+¹ªÄJ¶R-¾"`3Öù›¦¼€ù1ŒïKÀ›Že—?~ìXWü+Á;=Ly·³bßõÀŽeç×Ow8Öå+`I˜ò¾ L‰a|·ËN.Áºñc]ð_@=ð½0å]u‘"÷b]؉%)"}÷Xÿýض'$º›“ò0ðv ëq^é«å—À¿Â¬kþ«Î+X ÜÞ:¼ l³ÞÙ‹VCc]{QÀêAYA×Þ Ûf¬†Z¬Ô†YvXCøsh[×]/Å>`[lBºö”€uŽª†nö©n{…ëÿ‡»ÒéÙ9. «'*œáÀøÄe˳,¯­ŽpIf0ë;Îb›Ä†Y6+AÚf·m]wñ-&¶ñ=B×$e:ð ¬ Î$%£m]%á“”‹ûß³(II¶±À¬‘-2@éÊŒHW= GKÄp¹<¬¥–® yOÃùæ>±ˆÖº&”#©ÀgtM¼ÀIX‰ê«aÊ[Œ•èÅÊSÀDzéÀ Àz`µc]OÃI/ÆJ‡ á@$ZêIé»Û€ë€ÝXÿ¿‘2 ßã+"Ý» ëßÍgÉD¢7Xž“ò¿X_ÖË’ˆˆˆˆˆˆD6X’”HóDDDDDDDDD¤4ÜK¤ï4Üë00Xž8/""2=ìDD’AIŠˆˆHê:'Ùˆˆ$Ã`™“""""""„’I)JRDDDDD$¥(I‘”¢$EDDDDDRŠ’I)JRDDDDD$¥(I‘”¢$EDDDDDRŠ’I)JRDDDDD$¥x’€ˆˆˆˆH ݶ%;‰£E‹&¦';<Ô“""""2œsÎÜ‹LÓ˜Œº].ÃcšÁ〒Q¿ >JRDDDDRܹçÎûfzzÆmÇ|–a ­»µµ•Õ«WF0¡Ë ¦$EDDD$bĈÓ/½ôÒ옠¬[·–d'(ÙÙYJPDzAIŠˆˆˆDT^Žëƒö?&9‰’’–.ŸY_ᅩì$T?kO9唯(Q‘ÁfÑ¢cfù|­³Š‹‡˜7®7“O_íß_éš2e$iiiI‹áÙgß<ø…/œ¸kÞ¼£Ç?*áõ766ñ§?-, ÀX`09ÉqH?(I‘”wî¹'ú|üsòä ÓF™Èºÿõ¯·ÌúúzÌ›” ˆÈ`ôôÓ﾿xñ|ÿç?V†×›¼†~´~ÿû¿˜n·{ð4ÏEJR$¥wÞœÑ.—±zÁ‚%'t’7QõƒAî½÷Þ@kk³+''½¹®®iw¢ê씤HÊ:ýô9ù¾ÎãqeïܹÓxàV÷ž=»ƒ0¦Li|öÙ_Â*%)’º<÷¹¹¹cÆŒLø0«ÆÆ¦´#ۭ犈ˆˆ$š’IY†a¤—–·žtÒñY‰®ûƒ>V‚""""’$JRd@hjjâ³Ïöƽžüü\Š‹‡Å½‘^z:Ùˆˆ$ƒ’^{ím\./ñ|BïþýûÈÉIW’""©äœd "’ JRd@hnn檫®áÈ#çÄ­ŽgŸ}‚—_~"n勈ˆˆHïhнˆˆˆˆˆ¤%)"""""’R”¤ˆˆˆˆˆHJQ’"""""")EIŠˆˆˆˆˆ¤%)"""""’R”¤ˆˆˆˆˆHJÑsRDDD¤_LÓdÏž:¶nÝKmm=µµ ÔÔÔáräåå’——M~~“& §¤$§Çò‚Á »vÕ°mÛ^êë©©©§¶¶ÇC~~yyÙä2yòpŠŠ²p„"’hJRDDD$*»w×òÔS¯óÒK¯2lØ0N9ådFŽÅ´iC:t(Á`ªª**++ùôÓ<øà?¨¯oà´ÓNâœsæS\œÝ©¼-[*yê©×X½ú ÆÇI'HiéXfδÊóù|TVVRUUÉ–-[¸÷Þ‡“³ÎZÈYgÍ¡  39'"êë[9x°‘C‡¨¯o¢±± —ËEvv&yyYdQT”×ëNv¨"q¡$EDDDúì±ÇÞfÕª¸á†¸ãŽß3jÔ(ª««yæ™gؾ};†a0mÚ4¾öµ¯‘™i%Û¶mãÑGåÆÿ‹/}éBN;m¦iò§?=ÇÚµŸðýïÿ'ù˰ÿ~V­ZÅÖ­[q¹\Ìš5‹… âñXM˜O>ù„Gy˜k®ù×^{9³gIÎ ‰õë÷ñê«kxóÍ·ñùüÌœ9“qãÆ’••M^^@€Ý»kX·n[¶lå“O>aôèQœpÂù¨.IŸÈ@¦$%¶fkË.V&0X²n)°<̶±®ÞpËúSžˆˆ "›7àÕWÿŇ~Haaaûòýû÷3mÚ4|>õõõ¤¥¥‘Í!Cظq#‡ñãÇsÝu×q饗2oÞ<¦OŧŸàÀjÞÿƒöd`ݺuÌŸ?Ÿ@ @cc#^¯—ÌÌL¦OŸÎo¼À´iÓ¸ùæŸpñÅ—°`Áî¹g99i ?'ý±k×!n»í>† +梋.â׿þ-'Nì´iš†Ñi™ÏçcÍš5<ùäüð‡¿ãÈ#grùåç•5è›w÷€mÉD¢§‰ó±³+A¹0B^Ë"ì³¢m°þ±NPâÁ@ ŠˆÈ vèP=S¦Lé” „r»Ý¸ÝÖ0$Ó4Û_¡JKK=z455MTW×1kÖQ›išx<\®ÈM– &PPOCCK”G•;wâ?¸Ûoÿ-¯¿þ:ßþö·Û”={öpá…2dÈ<^¯—ñãÇó›ßüÓ4IKKã¸ãŽã–[neóæ-Ìš5››nºƒÖÖ@’*éÖ¿V&;‰ž’”ØY”Ó5јÙÏrg`õ^د!ëL¬äÈ^—ˆòím–9ö3éèê©ÌÐ}#%q""’‚Ž:j4Ÿ|ò W^y%h_^RRÂöíÛ¹ûå–[øéOÊ<ÀúõëIKëèÝØ½{7_|1ÍÍMLšT̉'NgåÊ¿óƒü€ÚÚÚöíf̘Á¶mÛøÝï~Ç­·ÞÊÏ~ö3V®\É«¯¾Ú)žM›6qÎ9ç0yòÄ7äéÉ'WóË_þ’sÏ=·ËºoûÛ<öØcTWW ñù|lß¾›nº‰×^{­Ó¶éééüìg·0mÚ4Ö­Û¨ðEâfÐ÷ÆÈŒ¶×ãP¶Ý;³‚ŽÞšÐþÞeŽ÷‰*ÿæ¶t s³ß÷T挶÷ÎýDDd€(,,äÀLž<™¹sçr 'püñÇ3bÄ.\Øiâü¶mÛØ¹s'o¼ñ¯½ö~ø! .$+˾3—AII }ôcÇŽeÁ‚œp wÜq”––²xñb†Jkk+UUUlذmÛ¶±zõjV¯^ÍÖ­[9ñÄñùš“zN¢QTTÀ† Ÿ„]÷­o}‹W^y…††ÒÓÓ 477sÒI'1wîÜ.Û766²uë6Î8£ë:‘FIJlÅzÔÒ¶Ÿ+?—†üÞ›ÄÈ9O&å—coÊ´“û|Í@CÈDDŒ?ÜÉ!E<úè£Ô××óÚk¯ñÒK/±lÙ2öïßÏ¡C‡8xð †a0dÈ >|8 ,à‡?ü!Ç<üñlÚ´Ÿ;÷3oÞ±üáwS]]Í+¯¼Â‹/¾È7ÞÈ8xð ‡Âãñ´—7jÔ(N<ñD~õ«_1oÞcƘVஸ "2@LšT̬YGpÄGð£ýˆ%K–ŸŸOZZ?þ8õõõTTT0räH¼^oû¾UUU<ôÐCÜzë­|á çQV–GIIo¿½–Ù³góãÿ˜Å‹“••E~~>¯½öZ{ÏŒËåbäÈ‘í·krùý÷ßÏí·ßÎUW}eÀÝÙëÉ'_ç[ßú×_= ¼úꫬZµŠòòr¶nÝJee%ùùùäææ 9tè---”••1mÚ4æÏŸÏÃ?ÌìÙ³8ùä“Y¿~3gOò‘‰ô’”ع«Wa='ϯ%úÉóv9ËÚÊw¥ê¯þ”?kˆ—óØâóÉ'ϧµÕן"z%4 Ú¸ñ3ví:@}}#G16èv–GØUD$Þlû9?©Q8TU5ñôÓÏñÓŸþ”U«Vqã72aÂæÏŸÏ!C(**jŸ“²jÕ*ªªª¨ªªbõêÕìÞ½›ÓN;ïÿûÜrË-œzêLªªêyï½(//çþûï窫®âˆ#Ž`îܹ´—gÏI©ªª¢¢¢‚W_}•êêjÎ=÷\¾óïpï½÷rÜq×ãñ Ì^„ìììö^)€åË—sÅW““Czz:Á`œœŠ‹‹yë­·2dH§ýƒÁ`Ø[‹ DJRbgVƒ=ÜsRláÖ-Yî9)v™å!ïc)Úòí»r…Óº¶ýcsuõ!ÏÚµá'Æ“ËåbïÞƒ:ÔÀùç/`óæ=­0èoé("‰w\²gãÆÝ,^|.W_}5W_}5@€÷Þ{çž{Ž—^z ¿ß€ÇãÁ0 Î;ï<.¿ürŽ<òÈö¡HÏ>û Û·W°cÇ>®¼òJ®ºê*®ºê*Z[[yë­·Xµj¯¼ò Á`¿ßËåÂívãñxøÂ¾À5×\ôiÓÚãzä‘GØ¿¿ž#òR¢tþù øÞ÷~Ecc#×]wÙÙw'»è¢‹X¸p!ï¾û.»wï&==É“'3{öìN½I` w»æšk0 “iÓJ}"1§$%¶ÖÑý¶úº¼?eö´Mè²¾–o/»(̺hÊìö‚AcÛÁƒÕÿû*owÛÄK `rèPŸûÜqdeÅoˆ™ˆÈ@4eÊî¹ç^xáN;í4Ün7sçÎeîܹL:•;3]»vÖÿÿýï³`Á‚Ne<ú裬Yóÿþïg‘››Å-·üóÏ?ŸÙ³gãõz9ñÄ9á„øãÿÈ}÷ÝGEEEûç—-[ÆÌ™׿LÓäø555””ä&ô\ôW~~Çs ¯¼ò wÝu_|1 .dÞ¼y”””0tèPÎ>ûì.û™¦É† xûí·yê©§xõÕW9æ˜9Ì›w´æ£ÈaAIФ03Û0È1bXÐŒö)0Qª­­sxât22ÖL‘D(*ÊäÇ?þ—_~9S§Nåâ‹/æÔSOe̘1\xá…\xá…a÷Û²e /¾ø"<ðûöíåg?»–œ/99^þßÿ»ŒsÏ=‡ pá…K8õÔS)--åŠ+®àŠ+®èR–iš¬_¿žçŸžÿýßÿÅå2øùÏ¿Ç3°†:=ýôûäååóÄO²mÛ6}ôQî¾ûn®ºê*‚Á %%%¶áòûýTVV²oß>JJJ˜3gŸûÜç¸ÿþûilläØcç2mÚ¨Õ›$Ž’IY.%ãÇk>ãŒS²zÞ:¶î¸ãg‚2°þ¯'"g&ñ‡?ÜÀÇïáÙgÿòòr|>………msHŠÚž“rªª*}jjjøêW—²`Á¤×›2}z)_ýê.\È¢E‹øêW¿Ê‚ HO·zó>úhŽ>úèn÷ß¿?O>ù$÷Þ{/MMÜrË·ÉÎX·a GIФ¤ .Xð5·;íÚ*ÓŸy慄ן™™Á† »‚Í;æœ×ù+V¬kMx"")Êïòãÿ‘™3`Íš5Œ7®}Ý?þñî»ï>6n܈aÌ™3‡o~ó›n­»aÃnºé&~þóùþ÷/¥¥ÅÏu×ý– .¸€ 6PZZ X‹þüç?óàƒ²sçN\. ,àÚk¯åüóÏçüóÏàÝwßå»ßý.k×náÊ+ ¸Då˜cÆò§?-ãÍ77sýõ×±qã&fÏžÍøñã˜8qÅÅŸáµsçN¶oßÆÚµëØ±c'žx<_üâÙLŸ^šÄ£‰-%)’rÎ>û¸S\.÷‹-ÊLÖJ</ë×â2MîY±âͦ¤!"’¢>ø`'¥¥eüå/é´|Æ ,]º”@ ã–í›6mbåÊ•ÔÔÔ™™ ÀÔ©SyôÑG9þøãÙ¼¹‚O?ÝÇYgÉo~ó›Nå=ÿüó\y啸|ÏÉÚ¼y3O<ñh_vÌ1ÇðÜsÏ1mÚT8™ââl¯×ÍÉ'OeÁ‚)lß^ÅÖ­û¨ªªáwÞ¡¥¥¥};Ã0ÈÊÊ"//‡óÎ;“É“Ë(-Íp‰™HO”¤HJ9ë¬ù½^÷—\riæ¨Q£’î]»Ø°až`K‹R‚IaYY|öÙgÔ×ד““Ó¾|„ ,Z´ˆgžy†ôôtLÓÄçóqõÕW·'(¶êêj**ö“““ANNü>Ÿ´´Ž¡JóæÍcîܹ¼óÎ;x½^LÓ$pýõ×w‰iïÞ½444’‘10›6Û¶UñôÓoðúëÿbÖ¬£˜;÷XN?ýXÊÊÊ(**jß.°ÿ~víÚŇ~Àí·ÿ•ªª*Î>{!gž9‡ü|Ý•Ró_²–N?}N~F†û…ÓO?=;Y Jcc#ýë_‚ÙÙFCCs#_IJ "")lÚ´ŽI}}=Œ9¯·ãqWUUU<ôÐCüú׿fñâ³92ŸáÃsy÷ÝuÌ›7›nº‰Å‹“••E~~>¯¿þ:‡âàÁƒ¸\.FŽÙéA†{öìáÏþ3wÞy'ßøÆ—ÉËXtÓ4ùë__æwÞã{ß»Ž|¤}>@}}=«W¯¦¶¶Ã0˜4i‹/îÔs²uëVî¾û|ûÛ¿àÚk¿Æ¬Y#“q("1¥$ERBy9®>È\1}úŒâc=6)c¼‚Á wÝuWÀ4ƒ®úú¦F0N_µê’‹ˆH*«©iæ¹ç^àæ›ofÕªUÜxãL˜0ùóçSRRÂðáÃ)..& òÊ+¯°{÷n***X½z5»wïæ´ÓNã{ßû¿øÅÏY¸p&‡5ñÁ±lÙ2î¿ÿ~®ºê*Ž8âæÎKII ¥¥¥ãóùxþùçÙ»w/{÷îåÿ³wßñUÖçÿÇ_''›,2H Œ€€@@öÖ ÅVEÀ=À=P­»5hVmkëè×UGµÕ ®ª€ÈRD‘)+Q{$FH™ç÷Ç'ÎÉ}îäý|<ò€Ü¹Ï}®„„Ü×ù\×õY´h`ìØ±L™2…wÞy›Aƒî·Õ^) ¦“•µ5kÖ\{JÙ'Ÿ|Âõ×_ååå€)÷j×®?üð€YÁúóŸÿÂäÉ7rÞyçñüóm¿’7‘š”¤ˆWøé§!OEGG ¹è¢‹,ÛÞýƒþ[žŸŸït8(./w]òÕWË[Wï[“ˆˆ7JKÛŘ1£¹ãŽ;¸ãŽ;(++cÕªU|ýõ×ÌŸ?ŸÒÒÒª>ŠÀÀ@œN'_|17Þx#ÉÉÉU;¢ýõ¶lÉfëÖ,n½õVn¿ývn¿ývŠ‹‹Yºt)³fÍbΜ9”––rôèQ||| ÄÏÏñãÇsÏ=÷Э[·ª¸>üðCöî=l«=BÖ¬ÙÀ]wÝuL‚&I)))¡¸¸öì–Í›7óË/¿0hРZÇ»wïΨQ¿bóæ½DG×ÞCEÄn”¤ˆåFætú>tèÐAÇßþö·²“?Â\ŽÂÂ#>åee\6gÎòŸ€ @40ø-PrÂKˆˆ4]º´áÕWßcñâÅ 6 §ÓIÿþýéß¿?‰‰‰¼ôÒKUå^ݺuã‘GaèСµ®1sæL–.]Æõ×ÿŠ  ž}öu.¿ür’’’ð÷÷gøðá 6ŒþóŸ¼õÖ[ìÛ·z÷îÍOúË1å_s-ODÄklޜ˜9óY±b‹-bîܹL›6ââbbbbˆ­êIÙ·oYYYäääÊÈ‘#™:u*ýúõcذa$&ÞÍîÝûÙ¸qéééÌœ9“/¿ü’x???¢££iÕªUUOÊž={Ø»w/ûöí#66–‘#GòôÓOÓ¶m[FŽÉ+¯ø€ΰaÃèÔ©;w&&&¦êüÊ}R¶mÛÆúõëY¼x1­[·æ¦›nbüøñL™2…—^zÄvDêR’"^Ãáp¸,áxã†7ƒ€·€ Àlà˜eäòÆ MDÄ{ää¢{÷$âãã¹êª«¸êª«X¿~=“&M"??ŸÂÂBÀ¬xDDDðÕW__u —ËEÇŽÙ¿¿€½{÷Ó¿âââ¸é¦›¸é¦›X´h÷Ýwyyyàr¹(//§C‡|÷ÝwDFFV]¯¤¤„ˆˆp.²U’²xñ*RSSIII!%%…W^y…´´4V¯^ÍçŸÎܹskí“ТE úöíËäÉ“yùå—©9ó³Ï>åçŸw3p zRÄÞ”¤ˆœÜaàrLÙ×_€'0++ׇ,ŒKDĽzµç?ÿùŒ©S§òøãW½ÒߣG¾þúkfΜIFFN§“¤¤$ÆŽ[kŸ”Ý»wóè£RXX@×®­ˆoÉ}÷=GTTS§N­Á;bľøâ fÏžÍÖ­[ "99™Ñ£G×Cœ‘‘Á}÷ÝGçΈ‹³WOÊÙgwâwÞáâ‹/& §ÓIrr2ÉÉÉ >œ{yóæqôèQÚ´iÃwÞÉ£>zÌŽ[¶laÁ‚…Œu¿ŸŠˆ[)Iið°øø ° Ó§²Þ¸DD]` /þóT¦O_D—.]2d0))#III¡mÛ¶\yå•U›2“››Ëš5kX°` ÌgÅŠ•L˜p1©©7âëëCxx/¾ø0ï¾;‡žç¼óÎåüó/`ĈÄÅÅ1iÒ$œN'GeïÞ½lÞ¼™ æ3wî<6lØÀµ×N`Ô¨^Ømãõ /L&#c;çœsO<ñãÆ«ÚW¦cÇŽ|ñÅ'½Æ¾}ûxõÕWyá…¸õÖ뉵W¢&r>>DDDЮ];úõëGjj*C† ©Z}ùé§U¤¥í¢oßV}J"n¡$Eäô·ß¯búUzW{,ŒKDš–1VPŸ]»ñ׿¾ChhW\qwÞy_ÕF¥¥¥deeáëëK\\‡£jÃÇùóçóÊ+ïãëëäÞ{¯¡U+³3úÆÙüå/oÒ¥KgÆŸÀÃÿžÄÄDÀ”ŒeeeHll,`å—.]ʼyóøÓŸ^§mÛxî¼ó2ÛMµjÓ¦?üð×^{-QQQUcœW®\É•W^IVVÁÁÁ”––RRRBÿþýyï½÷ª“J‡bíÚµœþ9V|*"neϵQïñ.0Ø ŒV`¦‰ˆ4YåÑGŸçü,[¶Œx€Þ½{ãr¹¸þúë ¥[·ntîÜ™ÐÐP¦M›VµáãÃ?ÌÚµk¹óλxøá¿RXXÊîÝyüéO¯òÞ{ï1þî¼óN)((`ìØ±„††Ò£G:vìHxx8¯½ö~~~ 6ŒÔÔT6lØÈ¯~u!=ö2eeVŠ<5]ÔŸeË–rñÅ“––Vëc}ûö%##ƒÝ»w³páB–-[F^^~øa­Åårñé§ŸÒ¿?Î?8 Ç®R‰Ø’‘3÷Ðøˆ¾¦Zˆˆ'¥§ïb̘ÑÇlÚ¸qãFÞÿ}Ž=JAA………ðÔSOU$®tóÍ7Ó§O6oÞËÊ•¹õÖ[9÷Üsk³xñb¾þúkŠ‹‹ÉÏÏçÈ‘#äååñè£Ö:ÏétòØcÌž=‡=óI{H@€“iÓn"1±£F"))‰ü3fÌà—_~¡¨¨ˆèèhz÷îM·nÝð÷÷çàÁƒ¬X±‚7ÞxƒÉ“'Ë3Ïü‰»ï¾–‹/îgõ§$â*÷q\LYÆï+ÞþŽi°¿ (<ÁãDDl§sçÖ¼þúøþûï:thÕñnݺñßÿþ—_|‘-[¶àããC÷îÝyì±Ç®uÙ³g³lÙr®¿þWð—¿¼Éå—_NRRRÕ9£Fâå—_æ­·ÞbçÎлwo¦M›vLLo¿ý6999ÄÄØo²•Óé`äÈtc÷î<Ö¬ÙÌ›o¾ÁŽ;ÉÌÌ"??Ÿ°°0ÊÊÊÈÏϧU«âââèÔ)nÝ:òâ‹tò'±%)"îS¤+1e`×I˜1Å[­ KDĽZµjÁå—c„ 0€qãÆ‘’’B‡˜0a&L8îã6mÚÄ‚ øè£Ø°a7Þx aa„…’r.#FŒà׿þ5]t)))ÄÅÅqÛm·qÛm·s-—ËEzz:óçÏçý÷ß'//»îš„¿¿}‹DLé[.YYûغuÛ·ï ..ŽÎ;Dyy9dgg“žþ3þ„†¶ ::œ@|}m6Y䔤ˆ¸ßÀàà`9p 0ÇÊ DDÜeëÖ\¾új+V¬`Μ9|õÕW<öØcGdd$ÑÑÑ”——“››ËþýûÙ½{7>>> >œn¸!C†0bÄpºw¿—;sظqiii̘1ƒ?ü©S§V]'22’¨¨(Š‹‹Ù¿?`Û¶mDDD0tèPyä¸à‚ xíµßìgõ—è”äç3cÆ"æÍû†±cÇ2qâU<÷\:uª×ø’’Ö­[Ç’%Køä“IOOçºë&rÞyÝñ±ož&REIŠˆgdƒ71»ÕÏžÊ-ŒKDäŒeg¢gÏdâãã¹ñƹñÆX¶lS§NåСCäååUÀ‚ èܹs­ëtêt99‡Ù»w?$..Ž»ï¾›»ï¾—ËÅW_}Ejj*äàÁƒ€YA‰ŽŽæ“O>¡M›6U×*++#""œC‡ŽÚ*I9t¨ˆûïÿ3W^y%[¶l!<<¼êcÅÅż÷Þ{LŸ>œœœN'={öäöÛo§ÿþôéÓ‡>}úp×]wñË/¿pË-·°nÝî¹çÒcv£±%)"ž“I<xxSþupиDDÎH¯^íyï½O¸÷Þ{yâ‰'hÙ²% `æÌ™Ìœ9“ŒŒ œN'IIIŒ;–  êž‰½{÷òÐCqðàºvmE||Kî»ïYZ¶lÉ<@pp0‡ƒÑ£GÓ«W/fÏžÍÖ­[ "99™Ñ£Gãë[} ³mÛ6¦L™B»vmiÝ:´Ñ¿gâÓOsË-·ššzÌǦL™Â¿ÿýoŽ9RulÅŠ|ðÁ,Y²„^½zUOLLdáÂ… <˜ ²ILŒmŒðE´nݚ͛7Ó¥KHJJ çwmÛ¶åÊ+¯ÄÏϬf“››Ëš5kX¸p! ,`õêÕ :”ØØVøøøàçç¤mÛv,^¼˜_|‘áÇ3räHFŒA\\“&MÂé4;¨=z”½{÷²yóf,XÀüùóÙ¸q#C† ¡U«h+¿,§¥rz×ñ$''Baa!N§“ÀÀ@üýýiÕªÕ1çûúúNQ‘öûS’"Ò8a¦}ÍÀ”ýÜŒI\DDleíÚ„„„òÅ_°oß>æÎËÂ… yûí·Ù·oû÷ï'$$„òòrŽ9BTT±±± :”Ûn»óÏ?Ÿððp ĦMÙlÛ¶—Þ½{óú믳k×.æÎË‚ xùå—ÉÉÉaÿþý„‡‡SRRBII ‘‘‘ÄÇÇ3|øp|ðAFމÓéäì³Ïf̘AUDÚÁÅçÁ§ÀM7Ý„O†’)S¦0fÌæÌ™SU>×µkW.¼ðÂc¦¥8p€x€}û²IJŠoÔÏAÄ”¤ˆ4žÝ˜ ÿˆ)ýú/0˜èe/±ÀÀ²²²((( &&†«¯¾š«¯¾šââbÆÇ‚ §¼¼œ’’ÆÇ+¯¼Rë 77‡  ‚ƒHOßBII mÛ¶eòäÉLž<™ƒ2zôhÖ®]Kxx8¥¥¥ìß¿Ÿ)S¦ðØcÕºÞŽ;((( 0Ð^·6‘‘Áôëׇ7ß|“gžy†ë¯¿ž1cÆÐ«W/üýýéÔ©wÜqÇq{ðàA–,Y§Ÿ~ÊÇL¿~ý>|¦|I“`¯Ÿdû+Ö¯·Ý1ÍõYÆ%"Ò`ݺŒ”Ô•>}Îá‰'R;v,áááøûû3sæL<ÈæÍ›HJJ"00°ê±¹¹¹|öÙg<ù䓌síÚEƲeë@l¬{7rt¹ðø0ß={r¸êª èׯýú™ã xòÉ'ùî»ïسgOÕù>>>”––ò¯ý‹áÇWõêTJJJ"7÷psORÞö[¬DNŸ’ë¬úÿ~ | <>Â#û‚ìÚµ¯°¬¬l”ìtÿÕ«=„|Ž;2tèЪã-Z´àÙgŸå믿æ‹/¾`Û¶møûû3hÐ &NœHBBB­ë”——óüóϳbÅJ®»n¤'C¶ƒôŠ7±1%)"ÖÚŒžž’[€B ãi0__:wަsg÷L× pÒ­[ݺŹåz§ÆÅöíÙGÌÏ(/gÄ×_¯-ðä³uêÉÈ‘ç1qâDúôéÃW\Á˜1cˆŠŠ`Ô¨QŒ5ªÞÇoذÏ?ÿœ7ß|“ÐÐPn¸árõž/bJRD¬ç¬ž¬Þ®Æì§2Øla\""ÍLU‚²±¼œ³g/Í;ùcÎÌœ9k9pà7näã?æ“O>áöÛo'..ŽÄÄDbcciݺ5aaa”••qàÀ233ÉÌÌdíÚµ´hÑ‚Ë.»Œ·Þz‹6mÚ0xð`Î:ë~·—¾‰46%)"Þc&0øè¬n>³2(‘æ¡ñ€ ¶pÓM· 7ÜÀ 7Ü@ii)6là…^`Ó¦Mlݺ—ËUµ‹¼ËåbÒ¤Iœþù´k×®ÖõFŽLaÛ¶l%)b{o‘S² “¨¼„a–ghØÏêYž KD¤)«JP64f‚0lXRSSÙ²¥ºÇÛ××—¤¤$^zé%.ºè"{÷î%77—6mÚðä“O2iÒ¤c”ÿýïÌ›7ŸnÝ´OŠØŸVRD¼ÏQ`2°x ³§JOàZàÀ ÷:p)p¸ž»Ü£Ô¶8Çê Dät¸Ø¾=ûèÁƒé%%GÎõtJ]ýú%PPp àšk®aÒ¤IôêÕ ¸ÿþû¹ÿþûë}|aa!ß~û-/¼ðüñ÷Ún ³Èñ(Iñ^¯c¦“LÇ4ׯÆô©¬8ιAÀpLÃýó ˆxÜ’Š?[E“U™ ®/))<¯±”Jçž›HŸ>¿gÁ‚5L˜0Ç3xð Î:ë,:wîBLLLÕ¹eeeìØ±ƒ­[·²~ýzV­ZE¯^ÉŒ;‚»ï¾§Ó39–––yäº"õQ’"âݽ1»Ó§ßwoÕ9¯'àLŬ¾Ô»ƒ½Ë¥w¹ì²ËøôÓO­Cš¶AVÐtÕLP ,KP*…†úsÉ%ý¹ä’þäå“™y€ÌÌ–,YD^^~Õy>>¢£#iÝ:šk®Íï~wþþžæUZZƦM™ù‡OÝß="£$EÄûe£€?bJ¿ÞwÅç ¨ø³=0 x£qC±ïJPê ó',,–ÄÄX«C¡¤¤”w–””½4kÖÒG­ŽGš5΋ØC)ð0f>΃˗¯-NOÏ(oìç.++Çår•¾=kÖÒûûùEN‡’;r¤tr~þ¾ ¬xn‡ƒ ‡ƒq³f-½×Šç9JRDDDD–––†Ëåªz\åy'S÷q'N<­ëˆØH4ð®¯WLtãõß™Q|""Ò,¸×™\ëׯw¥¦¦º&Nœèš>}ºkâĉµ>VóÜšoëׯwMŸ>ý˜j>îx×8Óï}w7n\åç÷ScÿcЉÔù9;÷2ŧøšH|Ý n¼¦42­¤ˆÈ)«\M˜8ñؾG½«%III£H3°øÑ×+tãµ@ñ)ÅwúÒ+ÞÄÆ”¤ˆÈ)KJJbâĉ¤§§“¤TöŸÔW^¥ `"n“ ¶:ˆP|gFñyF4àöZˆœ˜çEä´Lœ8ñ¸«(Ý»wª“‘šMó•çW;•©`III¤§›ÆÔ/"nÒx {ÞlËé)¶_ס–F#õR’""§eâĉÇ]-©l¦ïÑ£Ç1“½¦OŸ^µ¯Ê©$)O<ñDÕãDDÜä0øØüH°2 Ópð7 ¯ÕØÄà{àBà_˜•‹ ãi¶Î¸qÞðPS{cPã¼H“Jíîrà`2fYT ׇêØ×cÆã-ÈûMáøÍû¹ÀkÀ¹è…|‘FãIJå40—Ëåš>}º+))ÉÂhÎŒ’‘&¡õOœ*þyÕÝiU€'ávR;îR`p-l]h^« &=Ñ´±Ê„eæk,""â5I 5~Ø™’‘&Ádrò¹»çoQø*õÇ}x8­Ô´Œ†G^<ŠýJmM߬"Ò¨¦OŸŽËåªz±˜ ˜Õ€óÚ¿Ãܰ.Åô0x‹/Oð±`°Ø<tn„˜¼Ýg <¯8Tñg Ï…#u)I‘æîD7ù•J€Ào›Ï=Ñ©™iÀy€Ç1C>â<”—;Q’R„I\oÅ$§CgÍÐoDÚ'EDDDš»¹ÀQ °Îñrà à}LÇ¡FŽ«¡Ž`•‹NpN9¦Äé ÌçòSűæ*Øœ}œ}\ƒW,ÑJŠˆˆˆ4w˜‰^5•cî“úËñÞ¥R}«A{[€Ö˜ý`žVÒ¼”JuWS¶{€+€ÿ¢ó-¥$EDDD䨛üÏ€Ù˜q¾s1e?ÞìsLMM‡€XLÿÌFÈû}\çý·€Q˜É^€¢{eËè /"""rìMþ[˜Õ0%RQÄÕPYÀš:ǮǬ üxÝ÷Õµ3¾Ì¿ýûÀ:à`?pðAl }³Šˆˆˆ˜›Õµ߃IJ 17ø«€˜fêPK¢k˜ÿÕøûjLâ5 sÃ}5ðtÃ]“‹ê´•À抿¯ÆbÆ7ßü½ñC%)""""Få(âé˜ Á”Lýø€Iê6Ø{‹š%kïUü¹sÃÜŽéI‘jŸVüù~ã?£1_·{€ç3(‘æÄö›'zmæ(Òä ÄüL÷>ÎÇÚ[+>þ9à׈q5”f¨”c{hÎÇL0s7r\Þ̉Ù̳U=¿3=Í<ÑXA‰ˆ4'JRÜLIŠH“ãƒ)óªOgLï‡ øÞY‘ò0³ž]ŠÙïÅÜÛhy¿kOòñQT'xx>‘æEIŠ›)Ii’:äã=1ÓŸ\Àÿy>œSv1f„n}®ÃìÿQLn”ˆš†KbÌ¿ûï,ŽED¤IQ’âfJRDš­˜¦jð'‹c©+>É9S0±—ã=QÓ1³UÜiq,Mž7.SŠˆˆˆx³¥˜WÖbú;¼©Ç#3•ìD^¦aú1Þ.ôtPMÄGÀ͘ïeàVkÃiÚ”¤ˆˆˆˆœºùÀ•˜Õˆ§1“³ì$ø à| µ4ûøpKÅß_®±0–&MIŠˆˆˆÈéù¦¯Ã…ÙƒäjkÃ9ebvUÆŒ/>ÞT39Ö[ÀTÌ}ô¿0ɪ¸™’‘Ó÷oÌ>>À»Ø«ÇÃ…YšD`&›miDöñp?¦dî]ÌÀq#%)""""gæÀ°gGfâ×lÌ^!s–FdÃôöø30›fŠ›(I9sOÆž=ŘÉUßí0‰J¬¥ÙG*f›?&QI±4š&DIŠˆˆˆˆ{<¼Ž={< ß«€.˜Ò¯––FdbÔ Ì¿ûkÃi”¤ˆˆˆˆ¸‡ ¸ø/öìñ8üøè…Ù¹¾…¥ÙÇC˜i_ÁÀÀkñ?%)""""îS\¹Á¯ìñhoiD§f0 Ø >¬ È&\À]À@&AíkiD6§$EDDDĽJ€‰À·Ø³Çcð+`OÅŸ¾–Fd•+iïcVÒ¾zX‘)Iq¿#˜±´+€®Ø¯ÇcfEe?0³ŸŠÃÒˆì¡ ³’ö!Ùô³›¥Ù”’ÏÈF騳Çcf¬n>pð‚µáØFåXç/¨.ù;ËÒˆlHIŠˆˆˆˆçä`V$¶bÏK"ànà÷Ö†c%˜±Î³x`!`e@v£$EDDDijvcz;²*þ|{õxÌ®J'1;­ËÉUî?ó Õ½Im¬ ÈN”¤ˆˆˆˆxÞfÌŠJ.pöëñø ¸3½ì/ÀMÖ†c…ÀE˜2;cVTZ[‘M(Iië1ÀaLÇß­ 甽LÅ$W¯a&˜ÉÉ`6Ê\Nõ…hK#²%)""""gp p¸xÜÚpNÙËÀ4À ü3@Nîf%mИDZ‘—S’""""Ò¸—cš«ŸÂ~=©˜’/à#`¸¥ÑØÇAà×TO{›…ÙøQŽCIŠˆˆˆHãû˜Œ}{<ÄôÕc>—>Ö†cû€‘À/À@Ìô¯K#òRJRDDDD¬ñ` öìñp·ÓpÌîꉖFd{1SÞ¶C€ÿA–Fä…”¤ˆˆˆˆXçàQìÙãQ¹iál 3b7ÁÊ€ldp°³²ò? ÐÊ€¼’ký x{öxTîòГ¨ÄY‘}ì öþ9ÿü,È‹(I±Þ£À«Ø³Ç£3bwf/¯Ñ䪆ÊR0%`—`;môé1JRDDDD¬çîÂìFoÇC˜ÉU?cFìÎD á µ¸ØÙèó-t®/€ˆˆˆˆ—(&_bÏ}˜½@¶ƒ0»Ô«Ï¢aÖ0}>oÒÌïÓ›õ'/"""âeJ€ñ˜]ÉíØã± Ó_±8•/ŠŸ€±@>&Y} 3ù­YR’""""â]Š1e?ßcz<æ--èÔl¬¨ìÇôY¨|©á–`&¼7³6ëèFDDDÄû«dÌîävêñXGõªÀuÀ‹Ö†c+‹qÀQ`*f³ÏfGIŠˆˆˆˆw:ˆi¨Þ@uG€¥šK"ÌP€iÖ†c+s1‰Jðð{kÃi|JRDDDD¼W6¦Çcöìñ˜\ ”~gm8¶òp5æk÷$ð°µá4.%)""""Þm'&QÙ‹Y™°[ÇgÀ˜éeÏ·XŽ­|Bõ×îOÀo­ §ñØé\DDD¤¹Úˆ)ýªQk·÷0ýàÌêŠ4Ì{ÀM˜Då9àkÃiJRDDDDìa ¦½Óã‘ji4§îeL_Šxó¹HüÜSñ÷`&5iJRDDDDìc Õ ÕO`¿òŸTÌ´*?`0ÂÒhìåÀý˜Õ¨W1ý*MV³Ý F¤rtëÖÍê8êUZZÊáÇ)//'** €üü|Š‹‹‰ŒŒ´8ºcíÚµ‹Ã‡ƒzŽÅáˆHór0ó‚ómÀÖ†sJÀë˜Õ€<Ì@€–Fd/Oe˜Deºµáˆˆœ—Þ<ööÓ)ü;ˆˆ¸Ë ˜>…Rà ‹c9UNàCÌÿ¡û€îÖ†c;Ä|튀ßX‹Gh%E¤ùxÏâçwQ@ ЪâOÿãœçÂìR<ó‹·50Ƽj´H¯8Ï[,^³:i–î^J0“¿fYÎ)ñÇLþ ì†[-È^žŠ1ÿö³­ GDÄ:×cnÞÓ0 GÍÕ‡rÌ/ÕÊ÷w`&¿„çZáשLI· 8ô·2˜³ÎjÓ.1±í¹gzõ¤ˆHCtļ:3³br6¦Ç¤’ X|IFc6»3u¦cÅy9˜Н{Î0¦‰˜D§%æòÀÇgxM;óÃôxŒ¶C€,+:Emï€`!æó8je@IIIþiiiÅVÆÐ@~ÀGÀÅ:Ä–&$Ä–Y„ÃEE%þ‡Y±~ý¶gr-_w%"M†f…cÕIIÝᣘ_ ßc’e˜1’`˜‡€«¨~%l=ðgÌ$—"7Å9ójÛ[˜ñ•U»ÈuÓsˆˆØI f•ÿaÊn÷YÎ)Û\| ¤´h¸"!!îk«‚q¹èùNìÑëQÒ»w—À¢¢b×µ×^áÛ¢E°%÷ø|ð!‘‘¡:Tøá™^KIŠˆ„c-+’^@Hsr¨”¬Á4Â×t¦ñ} &Ñq_ÏV<Æv¿nÁÔcOĬøÜŠéinŠ1ý{G1ì¼óðÍÊj?nÆ3<YäµhxEy¹kn÷î ‰¡]¬âÈ‘"g` Ó™•up©Ïªúö={QxxðÐ)S¦8âã­™=ðøãO¹‚ƒ……GËÁUr¦×S’"ÒütÎåÄ¥[é˜Ä¢2)ÙRϵ1¼îzW+ÄÔE¿ d¸9öãqa6›¼I¸>ÇÔçÞn„DDÎHÏžoÙ2ôq‡……øùùGýƒƒËò0«Ò–èÔ©m—ví¢fFGǸ|ð~'µ?5Š]»vó /âpø,oìç?U&A :eÊ«”W_}—«Èѱc,™å’‘¦íTK·VbJ¨²OrÝLp;[ql'f5ãÌt™Æ¶ ü˜\‡YU™ |cA<"" Ò³gÂbc#SûôéçhÓÆš›ÌO>ùœ˜˜23sVZ˜ß566jm«V±~¿ýí½ ™øèvÅÅ¥üõ¯Ç××IQQñ¯xZå ÊÝwOñ±ê{çÛo¿gûö-DE…IFF¦[®«$E¤i Æ4JV&%9vBJ.°€êU’µ˜:æ†è ü¸¨8ö#ð<¦ÚêæÂ2LyÙ—À»@ÌçúfÊX¡u¡‰ˆ«gÏN϶jñ»ßüæbÇ AÖ ez÷Ý÷ñóƒüü#.‡Ã‘oE gŸÝ6966jEllœó¦Z’ <ôУøú:<¸K–¤[FƒôíÛõ»ððà!÷ß¿OLŒ5S›33÷ðå—_àëêÒ¥­[ו¤ˆØ[{LC_å*IWjÿ\ŸJéV}œÀe˜~“¡ÇJ1åT/`V_¼M0¸³ÉÙ­˜¯Ï À ã©R™ \rÉ¥ŽûYÃ÷ßÿÈÏ?§JNNž[ÊtNU—.ñ½[·Ž^ÖªU¬¥ Ê´irø9zöLÀélô*³SÒ§O—11áí""bøüó/-‹cíÚuøù9éß?Ñí…ŠJRDì£!¥[E˜r­Ê¤d°÷4Ÿ¯p30è\qì¦ÿã5`ëi^·±”`VUÿÂì°S’ö¬_õ‘fÌ$(á¿»ôÒqެÙ{1;{Ÿ}ö)~~~®®]Û:rrå sççÄÇ·\ë|à©–uäLŸþ)EEŽöíc ²*Œ l——WèÊËÛnY .—‹ˆˆŽîÝ·/0«*ɘ^šTà/œâ˜N‘Ó•”Ôa~||ôHÿ`V¬XÅŠ«,‰cݺ4üý}éÛ×’ ¿U‚‚hÝ:ŽØXkz**-_¾†Žãl‘ 4JRD¬áÀ¼º¢Ò­bª“‘ï1cÏt—öºü0›.ÞTÖÅ4¿ üäæç³Ú÷˜¯ûŸ1}*Ï—`zUc\²ˆ4saa!ƒóò p8 ]¹¹9–Åáïït´iI` ¿e18zôèJ·n,cùr{-¬ûøøðÎ;YævÆ3”¤ˆ4Ž@L"R™”  º”ªRÝÒ­Ÿ€Å‰ILn¥:9Êþ¼Âé÷±ØApð)f?—ÁÀjàQ̪‘˺ÐD¤©óõõ)6¬?#Fô³ôUûgžyk§ãçŸ3Èʲö׎ÃÊ jNCy¹‹¡Cûpò“=¤¸¸„ï¾[æ±ë+IñŒÌÔ­“•n­¤:)ùÏ—uÇL麚ê]å×cV>Ä4Þ7_=€—€k¿cJÝnvY—ˆH³áããrìÝ›CNÎ~Kã°[’.Î>»#V÷¤(IñnÌÍÍ&÷ºëÖuK·VYßE˜•“‘˜~f/‘g+bj®b6}ü³‚4 “´=ˆ™b&"âVN§¯ïêÕëY½z½¥qX]æU©´´¼ü¼óûX]îõ׿þÓÒç—c)I9u5K·úbJ·ZÕ9絓O–nÕ'SÎupvűàMàeÔƒQÓ `&1¹3byæk·Ï¸D¤‰q¹ÊÊÚµK K—–Æ1kÖBKŸ_äd”¤ˆœ\4Bõ*I2¦á¼¦­˜„¤1K·êÓ³·É˜²3€˜ýAÞY–×Û‹i¢¿“Ć·cúWDDÎXy¹ËÕ’=ºZÇ—_.°ôùENFIŠHm )Ý*Á$"5ûI21Æú Äìor P¹Ž¿øð&n9¹wo·1Éé'˜•–Û1à DDDÄÔ¤Hsô§:)éÄÖ9ç µWIVùã‰8Ë0ÍðC+Ž•ï/`)9uÛó[0›XNÄ$7ó-ŒKDD¤YP’"ÍM¦y¼²Ÿä ¸Î9Û€ïðŽÒ­úD`ÆèÞJõJÏLOÅ«˜ÏAÎŒ óõü³d`.ðfÅÊ[U‘&GIŠ4uIœ¸t«³ÚP³ÉÝJ·ê“ÜLB+Žý‚)ézÝ8{ÂÏÀLbò$&1ü0 Ól/""b‰yó³yó>ÇàÁ}HNNôès’iJü1“¶*“’~@\s 1&V&%KÃãéºSÒ53B`fóÁ™˜dK<§3®y¦g¥7°Ó`ÿ Ík·Û±#“}ûr=ú±±Ñ´mÛúä'ÚHII)ÇଳÚ{ì9œN§Ç®}"JRÄÎ"1 IeRÒhQ眘&èÊU’ ˜N;ÄLšºS–ps“ü ¦7F×:`0 ø-fï™ó1ÿN«,ŒKDÄÖ¶mÛÅþý‰‰‰òØs””x[å¶{¤¥mdûöê=ˆ{öL¤mÛº¯ÑÚ’±“š¥[}1{ÔLïíVºUŸhÌáÛ¨^ Úü&9ÙkQ\bc&¦ý ó}¹3âù÷hŠšˆ4ÀêÕé,]êÙך:wNàüó‡xô9Ü):º%]º$T½ß¢E-ZÔm•šz÷îN~~a­cVîBïNJRÄ[Õ-Ýê Ô]£=‚IDìVºUŸ˜Ò¡Ë1SÇÀ¼rÿàCTRäm~ĬÞ=üx3²øLŸˆH½JKKéÖ­3ööØsøøøœü$/Ò‚·°kמªcÉɉtïÞŨ¼ßΙdff×:––¶Ñ­ÏÑ£ÇÙtîÜø›*IoÑ’êæöúJ·²1¥[•IÉìSºUpæ·r„pf_Ž1Ÿ§x¯#˜U•ùÀ›˜Äú' ø3ê‘ر#“‚‚êWÁÛ·')©yÞ”wìØ–èè–Çß±Ã}aa!DD„¹ízÞ 7÷ qq­hÓ¦•Çž£eËp]ûD”¤ˆU:bvó®LJê–n¹h¥[õiÜ ÜTþF*ÀÜè¾l²(.9=sžÀs˜é_Ï¿ÁLÓ¿¥ˆ£S§ö„‡×¾aŽˆ­çì¦oݺ lß¾›Ðк¯OºÏYguhrI À¡C‡ñó«¾…JHhGTT„…¹‡’i ~˜ðúR”Ô-Ý:Š™–T™”,ò1ÆÆÒÓp}PùÒÄ̆ KÜà¦èÀ?1ßë+1¥`o`orržžáÑçhÛ¶5ýúõôès¸SÏžgÓ»ww«Ã°•NÚ‘“s£G‹«Ž••Ù½ÈÄP’"žAíU’^@Ý.®}À74­Ò­Ü \ŠIÚÀŒB~3Ö¶iŽižfa¾ç_.^ÆaVÎv[—ˆx‘¼¼Ãx´ç¢E‹ ]Û-ZÆ¢E˪Þ<¸ýû'[‘=edl'#c»Û®—ЖøøX·]¯¡”¤ˆ;$#8qéV:Õe[‹-¢%|«1cjûV+Þ^À¼Ê.MÓ>`<0“¬üXù^xϸDÄ‹©µ7HLLíÛ·±0"ë Òç¸CŠŠŠsöéñõuZ¶ç‡§lÙ²'ÑÑž+ïòõÕ>)b>˜=;j6¹Ÿ¬tk9Í«Œ)s3z PùÛf?¦×äušVoœØ à;L¹×E˜=n~Üxv×2ñjqq­(**©U¦SZÚ|Õøaë×oÄéôÜD²^½º1hÐ9'?ÑfÂÃCkí/Ù’{­¢’9™pÌ*Ie?É Ž-ÝÊ¡vR²–æ¹WD7LI×ÕT~þ¼ä[—Xkp1&iý+fue¦Áþs ã/“••MVVöÉOl ˜˜Hºvíè¶ëyÚ!}Ô“rŠ¢¢"ÈÌÌfÏžêï›sÎI"$$Þ¨ÜCIŠÔÕ8—êU’®Ôþ>i®¥[õ©!|0³Òä¾Ä”t-@chÅ|O¼Ž™ö6ægì˜Ò¯»°÷þ>"röìÉfïÞ}tè๛I??{ÝæýôS¿ü²¹ê}í“rríÚµ¡eËÚ¥^ÅÅ%ddlsÛsÄÄDZ2Í^ß½ân )Ý*Âl’X™”,C;ž×wbš¤Áì™ñfgø5Å%Þm+&™½xó=4˜ŒY‘f$88¨V™NHH0‘‘ö{:Î9'‰Äijj³[ã¿V¯N'?¿`=‡¯¯SIŠx\ÕÉÈPÌXàºCÙs1¯þiì^"•â17˜“Ê“21/¾ÙpRäDÊ1«ló€aJ)çcúVîÇì—#"M\XX(»víaÕªõUÇ:vl×l“”ŸÞDzºg·•JJêÒ$§…%%u嬳ÚW½ïççÄÇÇþ”¤4m퀪ûIz¡Ò­Ó•ŒÙßä À¿âØZLÁ÷‘æ" Óãõð¦Ge8fåÆ%" :º%II]9îÎ2°°bc£Ýv=O:z´ˆÄÄNtïÞÙcÏáççò“lÆÏÏ—ï¾[ÆwßUn>ï¼Aœ}v' £r%)MÇé”n-Ç4õÊñ91û\LÅ|MÁìg2³r²Ø¢¸¤é(Å”}-ÀLþê†Ù?ç¯ÀPò+ÒdmÙ²ƒ_~ÙâÑÁ[í‚¶ IDAT·ne›$`çÎ, T½ß©S{ÚZ‘÷2¤/ô:æx^žûZhüOIŠ}Õ-ÝÔ-ܹá©LJTºÕ0˜Ão* d €71ÉÉæz'rº–c^dHÅìPÿ0¸Xm]X"âIíÛ·©µ7ˆÓ郯oó¼5ëÒ%á˜R7õ¤œÜ·ß.e÷î½ý¾éß?™¤¤Æ`Ð<ì)8ꤤnéÔ.ÝZ ü‚v2?0=“¨NøvÏcúMò¬ Kš‰£ÀرÄÿzbU<)ÓϲHâëëËÏ?oâ矫û0’’º0lX £²ÎyìÜY{±ºïŸ©ŽÛӜߜ{î@ºtI°: ·S’â@LBRÙOR·¸°˜ê>’ï@V#ÆØ”\€)éƒ)›ó5}˜‰FKãúóóÿÌjÞÀ…˜^• xüY˜Þ2—§‘3×½{g:ujwÌqw–éøúúè¶ëyRNÎ~üü|éÐÁså]VL¨j ß~»”ï¿_Yõþ°aýèܹƒ…¹‡’ïˆIHNTºu€ÚIÉOh Йð®Â$'}*Ž•ÿÁL^ZYÏãDÃaLÉá'˜2ÃA˜²¯iÀŸ9qâ<Ø…Ù@TD¼ÔÚµ¿°bÅz<ö:µcĈ»¾»•””QTTÝŠUkD³ëÜsRZZZëXP=Ó“Q’bhÌÔ­•nm¡vR¢Ò-÷ˆÂŒ¾ ˆ«8¶x x ­F‰w™ôÀ¬êÝ <ƒYù»ØYÏc:b¾¿?Æ Ë/•œœÈ!}N~b3кu ;wf‘Su,$$˜˜ ƒ²~XÉæÍ;<úƒ÷!99Ñ£Ïqµ·’’R†PkŸws:­ÙsEIŠûý©NJú±uÎ9ˆIH*“’Õè&ùdºaêòÊ\Üœ_ñ¾ øSÒµõ›ˆ}ÌaVü.©ñç-Ô~Q£rË€÷€½£ˆ4Prr"]»v¬uÌßßÏ¢h¬·mÛ.öï?èÑò®’’¦Y’–¶‘íÛwU½ß³g"mÛÆàö $åÌE#©^%IÆô;Ô´ ø•n®0LmþÚžŒ¹q»“ÜÁܰý°ÆÝŠ4’½À¥ÀDL’2ó"Ç혟 òå´Pà÷À”ÆSDN&=}K—zvÂxçÎ œþ>‡;EG·¬5¥ªE‹ Z´¶. èÝ»;ùùµw— ±(÷j*IŠ“Æ¹éoHéV9¦\«fù–J·NŸ³£{fêщ´Åì1q^ql7¦ßäm ÛC1Š4¶˜ñÄoa^$ù¸âس˜Ÿ™J·`J7Õ½€ˆX«´´”nÝ:×Ú'ÅÝ|||N~’— iÁÆ[ص«zéääDºwoüý9ìdçÎL23kßÞ¤¥5ddÃõèq¶%ÓÂ잤¯b^Eƒ1IIM¥T'%•‰‰’ïö<¦œ¯>à^̾)w ©]bïR½Á¢'~”GÖx ¨ñ÷ÊlžÅ¼ØãM+Ƈ€_[„ˆ7ÈÊÊ&+Ë}Óòcb"Ù0Ò› ÒG=)§(**‚ÌÌlöì©þ¾9çœ$BBâ-ŒÊ=¼=IŒ)ý©oÛÌ_Õøû>કnÙËuÀÔž{#¦Iø à Ç"q‹8~ù¢|9µ±Æ ÿ§¥YÚ³'›½{÷Ñ¡ƒçn&ýü¼ý6¯¶Ÿ~Jã—_ªçßhŸ”“k×® -[Ö.õ*..!#c›Ûž#&&Ò’©hÞüÝ{fGå•0äÿ¿½{Šë¾ï~ÿæÂÃE„b„,É’eQUöªã4Ç+N.K®Ó\žœ¶ÎYyVn«Ééq’Sã¦'IÓ$mÚäižÄÉJšäi+y=Orìød­Ä‰-×aI–%nK€„@\$@\Ã> Íp·½gï÷k-ì={~¿/0{Ïï;ûwù[I¿’T+¦ž¯ç4ÙpJdï–ÔgQÙŸ×dÿ}À4ååå*.NœO4­Ö××§7ÞxÃî0[¥¦¦ÄtÓ RµzuâO»;wU^¾)f_¢ ü·Ã©SõR 0uÅ óx¯×£W^©Ñ+¯ÜnÏÝÿ½Ú²eêÒ‰'ÒÝ륗^²1ŒÙ]¿~]gΜÑÛo¿­3gΨ©©I¡Ðô^]Ÿþô§õÈ#˜Z÷G?úQ½óÎ;¦– ÐÒ񻮮KWÏÏ_“0IŠ$µµuhhh8²]RR¨¢¢ÙÖ©…$UUÝ­{î¹kÚþþ~ó–ùKNNVrrü¼H’rß}÷ŽòùzßûÞùyppP¯¿þº^yå=zT555Ö·¿ým=ôÐCzðÁïPÒ¹X„ÂÂu1kƒ¸ÝIòxœ<\Ø:eeEÓºº1&en/¿|L—.uZúºÙ³g»‚ÁøO`pgB Ѓ>IFnÞ¼©ãÇëµ×^ÓÏ~ö3mß¾]yyñ_`¾<šÕÐp{F0X¦ýû÷Ø•}úúúÕÖ»RÄÔí¥*.Þ0mpþrpß}{UVVdw¦K¸$e*ŸÏ§ªª*UUUÙ À¼TT”ª¤dôýfvÓñx¼JMõ›Vž•º»{åõz´q£uݻ옡*^~ù˜^}õDd{ÿþÝ*-ÝhcDæHø$ Ñœ>ݨãÇkå÷ßi¥…¥))Ù |³…4:z{ÒÙ¼¼ì˜)š1Ý}÷íÕøxìrS))‰‘˜ÎeÎ$åSŸú”êëëTèøCmØ0ýÓLÚ¾½\UU»ìÃòósÕÖÖ¡®®îȾ@ U¹¹6•^{í„Οoµ´Ž}ûviûörKë˜ÉœIÊ›o¾©ãÇkõê¹gáV¿nܸ1ï¾ÿýïëüùós哟ü¤Ö­[· çÀrÅuHL§O7êܹ۳ˆ–——èÞ{wÚ‘}’““•™™³¯§§O==æ­Ûœ——£‚‚|ÓÊs‚±±qýþïß³NŠÙÜn{Ö\™Ww¯µkת­­mÎãÊÊÊÔßß¿ þíßþM¿ûÝïôœ?ù“?áÍná: $žíÛ˵ysqÌ>Ÿ/Ñ—®[¼ ÚÕÛ{ÍÒî]ccÓ—°XêêÎéâÅöÈvee¹ ÖÚ‘94&%33S^¯WïÿûUX8™±=óÌ3ºqㆮ]»¶è òòòÔÙÙ9çqEEEºpá¢ë€åŠë(Xêë›uìØ)Kë(--Ò$ÎÄB99«bf©JKKQZZª}%€;*488³/#cy,¡± $åÉ'ŸT0ŒÙ÷ío[’ôÄO,:ˆ¤¤¤˜í‡zHÅÅ“Ÿ.üä'?‰Ü™z⃮$€óqËøø¸¶n-Y'Ål‰t¾i:w®EííW"û¶o/WEEü×çH$mm—uùrW̾ººs¦Ö±mÛ[f sÔì^O=õ”vìØ!·Û-—Ë%Iz×»Þ%ir?ìAW qpGkëeݸqûSðÂÂõ¶,šçÅÅÊÉY5mk«yk¥dd–Ý4Ä==×´ví­[·Æ²:V­Ê´¬ì;YP’òꫯêüù󪪪RNNŽ$é׿þµ†‡‡544´äÚÝn÷´3ÃÛá7[؃®$@bà: $†’’BefÆ6˜³²Òg9zù;sæ¬.^¼¤ôô4ËêØ´iã²KR$éúõy½··mPvv–™cAIÊ‹/¾(·Û­’’’H’òóŸÿ\}}}Òš5KËâZZZäv»UTT¤ŒŒÉÑÙ³g5::ª‘‘¥¥Y÷ÂÅÑ•H \GÄÐÝݧúú&Kë((È×îÝ•–Öa¦ÊÊ-Ú±£Âî0JIÉuw_ÓÈÈíõeB¡ñ;<Ãz†)å,(Iùë¿þëicR¾óïHZÚ˜”°ýèG’¤/~ñ‹Ú»w¯$éë_ÿº®\¹"—ËiÃ>t%œë(úûä÷'[:æ"--Ų²­pôhŽ­‰lïÛ·K{öl·1¢ÄÔÔtQMMM+¯¨¨@ë×çÍëØ‰‰1¥¤ø<7o¦|.Ìýa]ÝÕÁÅÖë¨1)ŸûÜç´mÛ6¥§ß¾Ýùõ¯]¡PH_øÂlŒ at%œë(8†††uõjOd;77[……+s¯ÔÔd¹ÝÏçmLNv¢¡áâ³,¸¼õVo¯Ëf×:)ííWå÷'ëÝï> ­[KÔÑqWÒ›o¾]08¸æ¦t5îñÜɆ ë´jUlW¯›7ÇÔÔtÁ´:rsWÏ9+ÚµkƒJMõ¹ö왼SuéR§B¡qwÿèŸ-¶Þ%).—kÚ¸3Ç!|üãWyy¹òònÎyê©§466¦¯~õ«¦ÕƒÅ£+ àl\GÄ‘ššÓM'HÕêÕöL;22®ôô€ñÄ%½ðÂQù|ç¿36¦×››/¾úwî ª¼|SÌ>»þ·µ]UJJжn-‘$?~f¼··ÿÇuu-æ 1É©SõR jY{Î$¥®î‚á÷'»öí›WôÜs¿ õ÷knn­_t½ 9¸©©I7oÞTiiiä“ôÚÚZéæÍ¥ÿßÖ®];íS¾‚‚I’×ë]rùX:º’ÎÆuH éjo¿¢“'k#ûŠ‹7Ø’¤œÿùÏ«²rrÞí¯}íkêëë“Ëåšö ûB ¨··Wééé‘7Óëׯ+ ) Ñvº’ÎÆuH 99« NŸ8ÊÌn:ååÍ=^Ø0\Z½:S~¿_¡PHmm—uíÚà—L dFFFU^^¢ŠŠRËêðz}sÓÛ; ÔÔTcÆ<—$½ôÒã½½ƒ/\¾|yȲÀ–Àëõè•WjôÊ+·§n¾ÿþ{µeKIÜb8þ’ü~ŸvîœOô‹_¼8100ry±³z…-j â/ùËÓöÆÒg„øû¿ÿ{I± àÏ~ö³4€„®$€³qCKK«[,]¯{ì½.I:{öI[ ô®wí×­åêl1>’ßïÓƒî—$ ¨»»×uíÚàg–Zö¼’”žžÍy\xÕñÅ ò÷½ï}OÏ<óŒ$illL«W¯Öõë×—T6ÌAWÀÙ¸Ž‰£°p]ÌÚ nwÒ´µÈ¬eÈãq)/o<&&&ÔØØ4ÑÛÛÿÏq B’TVV4­«[¼Ç¤„B!¥¦¦ÙÙ™.IúÕ¯^ïí¬9¾Ó¼)×–™‰‰ I.mÝ:yì¹ç~k ö=ÛúìRËžóLX·n:;;çUXZZšJJJÜXœ5 òûý’¤ŒŒ /¨\˜®$€3q‹ÇãQCC³nÃË´ÿž¸Åpþüe~:ôIÒÑ£ÇeF¨®îÂçãÄ-ùùk”Ÿ¿&ÞÕFŒŒŒ)HÑã?ì’&g§¿éîïqÜ]”ŠŠÂº\Ò~´ä<`É|>¯*+·H’ cB/¶×® Nïrµs&)Ï>ký```@–׃¥£+ àL\GIJcG…­k‚†¡@ E6D>`¬©y{¼§gà'Z事1<|ÓýÜs¿ÑsÏ™7[ìb¤¦&+##`¤§§º$éùç_ õ÷5/ev*+lݺñÏóó³¿WYY¡w½kŸÝáèŸþé§Æñãg\åå›tâDÆÇ'†ëë/~ÃŒ²ãyOqF¿øÅ/üÉ^x¥sÄ]Içá: `¡ còëÿøAIÒ‰ur¹Œ¤žž‘OÄ+†Ý»·¼–ššlddìÍP$õööëÚµA—$õ÷jpp0éÚµ¡ÿjw\ѶnÝøçëÖeoçÎm®¸×îp$IŸüä]ó7ßÑO~òsIF¨·÷ºikëØž¤D/ ç¢+ à\\GİeKÁvÇ“vêT­Nªû JII޹v¼ôRÍxoïà¯ã4‹UÒž=åõ€¿ì‰'þ$)3ÓþkXoo¿¾ûÝŸé™gž5’’\ÆÀÀðåsçZ_´;®°­[ ?´~}ö÷\.¹Ž?:~ü´Ý!Eøý^×èèÍ$—+ÉÈͽhZWAÛ“$º’°xåå…w¯]»ú¼¼ã#y¿íw~õ«ÿTmm£jjN+33]†r ü—xÔ½gÏæãYY-¥¥E:qÂ9½©Ö¯_«®®n—Ûír >iw<Ñ&&&®^¾Üýa»ã˜M(4‘<44òÿµµÉ´O«IR0'º’°xᥰp]Òã¿7Éîx$é=ïÙ¯ææ‹zùåcòxÜ}}Çâ1‹U0Xü·>Ÿ¯hxx¤ÿÌ™F««[°ÑÑ1ïèèøÉúúÖŸÚK´³gÛew ñF’‚9Ñ•€Å)-ݸsÍšÌ×’“½žÖÖË¡¿û»ï…ìŽ)Úðð¨;559©¿ôÏãQ_]Ý;_”ôÅxÔ…ÄF’`—+äëî¾ögvÇ1ÃPÒ¥K]íí=Îéwˆ$À2MMíÇ$³; Ñ8¢_$„‘¤p’H.—K.×ôYK:$—Ë¥ºº:¢À¦')¼Y€uŽ9ù¹®®.f€åbÖ$ej²ár¹ôôÓOÇ%¨¥"Q°UWWÇ\‡Ÿ}öYUWWO;.|×ÅårÅ$1GމyìСC’&“èýÑ×ÏÙž€•æu'åСCª®®ÖSO=eu,Ã0tøðamÛ¶mÎç`¥9“”ð§vÑ ÊLwY¢·Ÿ}öÙH#uêÝ—Ù>á oG÷»?!e…ß\·mÛÓ{¡Ÿ,bá¢:á¯úz¦]̾›räÈóX04ª«« Ã0ŒÚÚÚ˜z%M«+º¬©ÇÏUVt|³þ9úØp}†a•••Æ­¿ÏÄÿßb©ç${öì™ö¿]¬©ÿƒ©¢ÿá¿sôvuuuäo¯[¯Éð×áÇ#Ç…·õº•dTWWGöUWWÇ]Ïdgg‡Ÿódüÿ%XÆz%ßüæ7tNM¥[תð¹vðàÁȹ~,úu¯Ρ©>|xÆë|0¼ãsÌðú믇Ë‹ûfMR4Câý†8ÓöL>|xÆFlôáÔçÝ©ž…”u§cÃÍö¦K’²0Á`pÆ×‹aÄ&!†›€†ÓàŠ¶Ødv¶äöNHR`S“Ø<_¢Ïƒðcá„b>ÂÇÎôÍr]\Hùs!IÜÉ»{ÕÖÖª®®nIæÃ]ÂÝfœ¹ )ëNǃA>|82…çÔ®`X˜ÚÚZI3ÿ­+**"ÿ‹èn*áÇ9¢ŠŠŠHYáçG÷ûÿ˜ž3Fwo™i›‰°œá$Eš¿$Ýî;/M&(Ñ5Ì´6°’Í” „ÕÖÖFÎ!iòÜzê©§ cö‡ ¸ÞðàÁYŸ€Õfíî5[ׂè®Uá±3u÷šÚ5`긄êêêY»‰Í´/z{®²¢»*ÌvìáÇcö×ÖÖÆüžt÷Z:MyM}MèÖÿ*ü˜­ëÉb_'³=g&t÷‚ELéîµÜÐÝ 0—9“”ðv¸?uP²¦4"£¿fjP†¿fêS=W S·g*+zõ|ê-^’”ù«­­cbÆ´¹ÕÕÕF0œ6%:é:™ÁÔÉ´È× I lD’2’À\L¹Ü¤,L0œ–ôE›i¢‚™îœD'™$IA¢#I™I àNæ“Ì×\}ÕƒÁà´ñ>Ñc‚žzê©YzìLûæÚ€³ÍkÅyˆ’ŽB’ÀQHRàTu{BžDuX“ñ¶;DB’+„“‹©³á<¥ÄK<‚š2ƒ©fþÝ`’X¡úÖ÷ƒSöœòx¢Ù&É%éˆ&‡¹îºuü!kÃZ^HR`…go}ÞúšúsôãSïP„‰6SׯðöÁ¨Çko}E—eD=¦ž]Î|…“ŽpÝSã9õstýS»~ÖôØî×Lu„ãX6wzHR`…º[_’ôè­ïS êvãÜuëK·öÍ”¨ÌGPÒcº}§¦öV¹áú¢Šèzë4Ùè_H¢þý¦>ç©Yòvʬ IDATÊ92Ãñá¿ÉÓ Œ+ºŽpÂRõ¼êùüNE’«Lm”œ²ÿ©)ÛÑ??ªÅ‰NŽ4ågi2)¸S½ IRêgÙD±IWØÓQ?{géÈ㊮#œèD'aOO©/¡¤À*áFr¸!~pÊþ;Yì”ùˆŽ'Ü5ªzåT̲¶äEŠM²o!qE×ý÷ w[h÷5G!I€• ðÃS¶¥éw94ÏÇ–*\vô݈ð×B¸Gß™¯è;#S“¶¥Ä>n[Ô>’`S»+E7èŸòXôÏÏ*VôƒènR‹]ot95ÿÁæÑI×B“”ðø˜ b»§-6®§¢¯‹ŠÇÊDÏR$)°ÒÔüÔ;)áOþ£gîÚ¦é ì:ņÿ%Ä`^§Ø™Àæ³àbøøƒ·âYÌÔÂ39Yj\ÕŠíæU­“â±;,{SG«»ÃãG¦<6×`ð;»MÓÍ´o&wŠq¶úæþÒÜ¿Çâš©Ž„$?î¤p’ŽB’ÀQHR8 I G!Ià($)…uR ¾õ­oéðáù¬Åµ2 ØÀÁHRV°7ß|S.×|Ö%°T/^Ôŋ퀄@’Öú™¤Mvá`×íà<‘$åwÞ±3Gµ;«cg †¡»¤A·¤ûíŒÀŠ4õ.Ê«’F%ý&Ìÿ¤ ·ãnJ‚1%I™LPrÿqïÞ*Û”¡¡!=óÌåñ¸u×]›\¯¼RkK+]eeÉß­Y“õ¹‡~ŸëÞ{íô¯ÿú?äõJƒƒÃ†Ëå´%Àb›7¬_³fu£Ïçõ¹ÝIvÄ0<<êöûÝkÇì¨ÀŠæ•ôÝúùeI#é·’~zk߯%=!飚¼Û²ñÖ÷¿ˆo˜X¬%')[·}zýúìoÞ{ïï¹ÞûÞ÷˜Ó¢<ùä_Ëëu«ªj«m1¬táåþè×Þ½»m‰áÕWßPCCV¯NWww¿- 7Àj%%eùùÙgÖ¬Éó~ö³ŸN²#†ööKúÖ·þI.W’$½iG V´hrÚáJúÝ,ÇÜ”ôß%ýP“]½þJ“w^Zã –fIIÊæÍ>SPýÒÒr×Þ½{tõjYq-È7¿ùφßïsÝuWIø q6™ d~î‘GÞïºçž»m‰¡«ëª~þóÿ%¯×klÞ\àêî®·%ÀJååë7çåeŸ¶3A¹ys\ßøÆ?Êãqktôæ˜1XñŽIzfžÇŽIú®&»€±ŸÛµnÝjùý>Ûb¬RZºqçúõ«jrsóÜù—Ÿ²íNááÃÿK££7\……¹ Rì °,)IIJr¹~ó›WåóÙëлÇ.ORhÿþ=:p`·­ÿ„¯~õ»òùHP°<­Z•R“’âóôôtéÉ'¿`[¡PHé驯úõ9\t–ZRva.ù|%'ÛÛ8t¹ [ë+¥¤$+?­òò²mãÍ7ßVqñZ€å–˜¤LìuĘØÃíöxNªÕ©Sö®KC7/,gn·KÛ¶mÖÖ­%¶Æñæ›oÛZ?`å°»Ÿœa„B6©¬l£­q¼ðÂlS¤‰/)ÉíjhhRGG§­q¸\ÜDÄI –dbÂ0²³Wi۶ͶÆñüó¿µµ~ÀJII†«³³[Ýݽ¶ÆA’ˆ’p¸ññ‰‰ûïß—dww¯o|c¾ë¦°4,ÏÀQHR8 I G1eLÊ™3gÕÞÞaFQ³*-ݨ²²bKë`?S’”®®nùýÉ*(È7£¸­^eYÙœÃ´Ù½ÆÆÆ5:z3²ŸŸ«ììUf‡;uª^ÇŽ²´ŽÒÒ"=ð@•¥uÀ~¦$)ëׯÕåËêêêŽìËÊJ7£h$ˆññqmÝZª½{wXVGRC¨°²µ¶^ÖÕ«=–Ö‘——cé]qæÃ”$%%%Y™™±IIgg·:;»gyÆÂåççiݺ5¦•óµ¶^ÖC‘íÂÂõ ËlŒX^.\hWoï5åæf[VÇØXȲ²˜/S’”ææ‹VNŽuÝ»ÆÇÇ-+KWRR¨ÌÌŒ˜}ÜMÌ—“³JeeE‘í´´¥¥¥Ú0mLJ^^ŽJJ6D¶4¥¦úÍ*×Ýݧúú&Kë((È×îÝ•–Ö8Y ¦sçZÔÞ~%²oûörUTpǰ¼˜’¤¤§§©¥¥M.´EöíÚµM›73eðJÑß? ¿?ÙÒÆRZZŠee‰ ¸¸`Æ;Ö­­—M«### ¬¬Œ¹ÀB¦$)¥¥EZ»vúx3ß8³²Ò•‘A÷!'ŽÔ›››­ÂÂu6F,/gΜÕÅ‹—”žžfY›6m$IØÎ”$å­·êÔÑqU€uý¢ËË7‘¤8ØÚµk4::¦‘‘ÛÓP30[eåíØQawXÊ´1);w™É 1::ºÔÑÑeZy¹¹«éBˆïèÑ=ZÙÞ·o—öìÙncD˜Ï´$åÅ_Õ‹/¾Ù>p`¯vìØjVñp¸+WºÔÙyU7®·¬¯×´—+ªªv͸QôBºKåñ¸åv»M+€Å0¥Õwß}{µÿžiûyã\YRSSbÖoRµzu–ËËk¯Tmí9¹ÝÖ-lz×][uï½;-+€ù0%Iyùåc:{öKß8ï¾{›vï¦KƒSed¤«½ýŠNž¬ì+.Þ@’˜¬ªjcRËžiýgî»o/cRV°œœU 7OÛßÔtÁ´:22ÊËË1­< ½õVÏG¶Y'°™–¤ÔԜҙ3‘mÖIYYZZZÕØØ¢ìlëîœäç¯!IÁжsgPåå›bö±~`92%I¹çžª¬,ÙX7?œ©°p]Ì ^·;IƒÝ³444«¾¾ÙÒ:‚Á2f ØÎ”äéÓ¦vë™ÉŽÌæ`G Íjh¸Ý€ ËfœPÀ⌌Œª¼¼D¥–Õáõú,+€ù2%IQeåf••™QÜŒ’““-+KWQQª’’ Óö÷÷˜V‡ÇãUjªß´ò€DÔÖÖ¡¡¡áÈvII¡ŠŠ lŒó™ÖçÂ…Kº~ývƒtóæbä›U<îôéF?^+¿ßºd²¤dƒ¸Ç²ò§+++š6ccRË‘)IJyù&õõõÇìKIáï•fûörUUí²; `ÙêëëW[Ûå˜}S·—ª¸xôÁùÄ›)IJoïuµ·wÄì3û³´t£Êʘ-ÌÉNŸnÔ¹sïD¶ËËKX0Qww¯¼^6n´®{WVV†ee0_¦$)]]Ýòû“-íÞÅ¢€Î¶}{ù´)§}>¯MÑËרXH££7#ÛyyÙÊÍͶ1"ÌgÚ˜”±±ñ˜7Îüü\eg¯2«x8\}}³Ž;ei¥¥Ezà*Këœ,??WmmêêêŽì R•›kcPXÀ”$eýúµº|¹3æ3++ÝŒ¢‘ ÆÇǵukiÌ:)fKJJ²¬l $''+33öÚÚÓÓ§žž>ÓêÈËËaÒ€íLIRRR¦¿qvvv«³³{–g,\~~žÖ­[cZy0_këeݸ1Ù.,\¯`°Ìƈ€ååÂ…võö^³´{רXȲ²˜/S’”ææ‹VNŽuÝ»ÆÇÇ-+KWRR¨ÌÌØ·ÜMÌ—“³*fMª´´¥¥¥Ú0mLJ^^NÌb~@ ï­ ÝÝ}ª¯o²´Ž‚‚|íÞ]ii€“i:w®EííW"û¶o/WEw,Ë‹)IJzzšZZÚtáB[dß®]Û¦Íö„å«¿@~²¥%­ÃJW\\0ãëÖVó¦|ÏÈ0 1Àv¦$)¥¥EZ»vúx3ß8³²Ò•‘A÷!'ÖÕ«=‘íÜÜl®³1"`y9sæ¬.^¼¤ôô4ËêØ´i#I Àv¦$)o½U§ŽŽ« ¬ë]^¾‰$ÅÁÖ®]£ÑÑ1ŒÜž†z|œ¸€Ù*+·hÇŽ »ÃÀR¦IÙ¹3ÈLNˆÑÑÑ¥ŽŽ.ÓÊËÍ]MB¬xGÖèèÑšÈö¾}»´gÏv#À|¦%)/¾øª^|ñÕÈö{µcÇV³Š‡Ã]¹Ò¥ÎΫڸq½eux½¦½\„TUµkƵˆ¢Ò]*Ç-·ÛmZy,†)­¾ûîÛ«ýû÷LÛÏçÊ’šš³~C ªÕ«³lŒX^^{í¤jkÏÉí¶naÓ»îÚª{ïÝiYù̇)IÊË/ÓÙ³ïXúÆy÷ÝÛ´{7]œ*##]ííWtòdmd_qñ’ÀdUU»“XöLë?sß}{“²‚åä¬R0¸yÚþ¦¦ ¦Õ‘‘P^^Žiå‰è­·êÔØx>²Í:)€åÈ´$¥¦æ”ΜiŒl³NÊÊÒÒ񻮮eg[wç$? I V´;ƒ*/ß³õƒË‘)IÊ=÷ìPeey̾@ÀºyüáL……ëbõºÝIòx옥¡¡YõõÍ–Ö –1[Àv¦´ OŸn4µ[ÏLvì¨`¶0óxÓÆ¤ŒÇ¼qæçç*;{•YÅÃáêë›uìØ)Kë(--ÒTYZàdùù¹jkëPWWwd_ ªÜ\ƒÀ¦$)ëׯÕåË1oœYYéf1>>®­[KcÖI1[RR’ee‰ 99Y™™±×Öžž>õôô™VG^^“žlgJ’’’2ý³³³[ݳ>nYÙXº’’BefƸån`¾œœU1kR¥¥¥(--Õ¾€°€icRòòrbó ÒXxoéîîS}}“¥uäk÷îJKëœ,HÓ¹s-jo¿Ù·}{¹**¸c X^LIRÒÓÓÔÒÒ¦ Ú"ûvíÚ6m¶',_ýýòû“-m,±hVºââ‚ïX·¶š7å{FF€iˆ¶3%I)--ÒÚµÓÇ‹˜ùÆ™••®ŒŒùw …B·Ûõ€¤ÿÛ´ pGCCúzµ'²››­ÂÂu6F,/gΜÕÅ‹—”žžfY›6m$IØÎ”$å­·êÔÑqU€uý¢ËË7Í;IillSffšËï÷Ý»w¯ÿí¶¶þ}—/_šû™X¬µk×httL##·§¡·wîÄ„aký€*+·hÇŽ »ÃÀR¦IÙ¹3蘙œúû‡”––b|ðƒïwýàG‚›6y»|>_Î… FìŽm%éèèRGG—iååæ®^PB—Kî@ å=%%e--íÖ˜âäèÑ=ZÙÞ·o—öìÙncD˜Ï´$åÅ_Õ‹/¾Ù>p`¯vìØjVñóöÖ[ÍòzÝúÀÞçÊÈhãÆ‚¤³g[n’ XëÊ•.uv^ÕÆë-«ÃëÿËõôéóJMM–Ûíñø|ÞÆäd÷'.þ7˂⠪j׌kE/¤»T[n·{ÞÇ º'&B›M ™”¤Üwß^íß¿gÚ~;Þ8C¡ eee—aLèâÅV£¯oà‹¦‚Y¥¦¦Ä¬ß¤jõê,[bWzzÀxâ‰Ç’^xá¨|¾óßÓëÍÍß²% À¯½vRµµçäv[·°é]wmÕ½÷îœ×±==ýÊÉÉJÊÊšx·ÏçùÉÛo·|ȲÀ+Š)IÊË/ÓÙ³ïXúÆy÷ÝÛ´{÷»4ÔÔ4ÊãñèCzÄ%IÏ?Tcc¡a>A·^FFºÚÛ¯èäÉÚȾââ ¶$)'Ož•ÇãÖ‡?üG.Iº~}À¾F‚‚å ªj—cƤœ=Û&¿?Y=t¿žþ7Ø»wëþÁÁîʺº«ƒvÇHl¦u÷ºï¾½¶ŽI™˜0äv»•››-gò×jhh õö|ö VœœU §÷øhjº`ZåååÌyœa¸´zu¦ü~¿B¡ÚÚ.ëÚµÁ/™`£·ÞªScãùȶ]뤴·_•ߟ¬w¿û€¶n-QGÇ]Io¾ùvÁààš›ÒÕ¸ÇX^LKRjjNéÌ™ÆÈv¼×I9q¢IÉÉ^=öØC’¤ßþö˜&&B¡ºº OÇ-ˆ¬¥¥U-ÊζîÎI~þš9“”cÇÎÊãIÒ>ð°$é—¿<ªññÐp]Ý…°,0 Nvî ª¼|SÌ>»Öjk»ª””mÝZ"I:~üÌxooÿëêZÌëç X±LIRî¹g‡*+ËcöÖÍã?•aòû½Ú°¡ 2nåøñ3¡žžþJšˆ[ +\aẘA½nwRä®V|òx\ÊË[#Ç£‰‰ 566Môööÿsƒ,ÓÐЬúúfKëËæœ-¬©©]ÉÉ>=òÈ»%I'NÔËåšHêîù¤¥ÁV ÓVœ·rq±¹Ü¼9.¿ß«?þã%IÇ×Êå248¨ÏØÔ ãñxÔÐЬ††Û ¨`°lÆ ¬rþüe~:ôIÒÑ£ÇeF¨®îÂçã`¡‘‘Q•——¨¢¢Ô²:¼^ßœÇôö(55ÕØ°!Ï%I/½ôÆxoïà ¬G0Ë¢“”âââ¼›7C®#G^03žEñû}Ú¸± ²ýòË5ã½½¿dÚáøÙ±£ÂÖÁ¼†a(H‰¹›VSóöxOÏÀOÄÝ4,#mmŽl—”ª¨¨àÏ0×¹s—äóyõØcïuIÒÙ³ïHšp üEÜ‚,{‹JRŠ‹‹ó ÍiiÉFqñF—ÙA-ÔÛo7ª©é‚FGÇÔÚzIRÈ=00ð„Ýq!~ cò+|7íĉ:¹\FROÏÈ'l 0MYYÑ´óâ=&% )55ÝÈÎÎtIÒ¯~õÊxoï`Íùóæ­Ü Xñœ¤lÙ²±8//PŸžžîýøÇ?””dÝ´ÃóõÐCô•¯üw}ç;?UR’k¼·wàÞ0­·eKÁvÇ“vêT­Nªû JIIVzzzdû¥—jÆ{{M÷,'ùùk”Ÿ¿Æ¶úGFƤèñÇvIÒ¥K¿éîïá. ÀT JR6mÊ[“››qÚçóúSSSôãÿܪ¸lõêUè—Ïçõ\»6ò1»ãYîÊË ï^»võyy9ÆG>ò~Ûï¦ýêWÿ©ÚÚFÕÔœVffº #äø/vǘaxø¦û¹ç~£çž³÷TKMMVFFÀHOO½µÕK¡þþ¡æææÖz[,; JRü~ÿ'††Fj‡†FÔÛ;`UL‹6> ôõ ý¨­íJ“ݱ,gᥰp]Òã¿×þ[i’Þóžýjn¾¨—_>&Ç=Ñ×7pŒ»iXvïÞòZjj²‘‘°ýÀÞÞ~]»6è’¤þþA &]»6ô_íŽ °üØþ¦‡ÄRZºqçºu™o¤¤$ûFGÇBvÇ3Õðð¨;55Ymm½A>ÝE‚KÚ³§¼>ð—}ìc'ef¦Ïý ‹õööë»ßý™Ö¬É5’’\FkkÇåcÇê7ØDù©¤?•ôAI?³9,A<±À2àr…|ÝÝ×þÌî8fcJºt©«±½½‡ mÏžÍdz²[JK‹tℹ/çÓ§›tíZ¿*+7kÕª…%?ëׯUWW·Ëív¹Ÿ450n!IÁ‚45µ“tÌî8€å,,þ[ŸÏW4<<ÒæL£éåŸ>ýNjÿ ÏÐÐàPff`|¡ÏóŽŽŽŸ¬¯oý©éÁX‘ŽJ2$ý¾Ý€É~ªÉë۟ږƃžqå¾õÝqãÊHR`%"I8š“nÓ­,ÿC“ÿïÇíXAHRŽæÄ$+KxòK@ü¤$v£±ÄçÀÑHR`·ðkÆ?$)G#IÝh,ñÇyp4’ØÆákÿ„­Q0 ’Ø$ˆ?Î;€£‘¤Àn4–€øã¼8I ìFc ˆ?Î;€£‘¤Àn4–€øã¼8I ìFc ˆ?Î;€£‘¤Àn4–€øã¼8I ìFc ˆ?Î;€£‘¤Àn4–€øã¼8I ìFc ˆ?Î;€£‘¤Àn4–€øã¼8I ìFc ˆ?Î;€£‘¤ÀnáÆÒ„­Q+ I ÀÑHR`7K@|¹4yí7ć‡"IÝHR€ø _÷IPŽE’»…_ƒ$)@|ðÁÀñHR`7L@|qÎ$v£ÁÄçÀñHR`7L@|qÎ$v£ÁÄçŽG’»‘¤ñÅ9p<’Ø_œsÇ#IÝh0ñÅ9p<’Ø_œsÇ#IÝh0ñÅ9p<’Ø™†€ø"I8I ì”$É¥ÉŰ9`¥ I8I ìDc ˆ?Î;€ã‘¤ÀN4–€øã¼8I ìDc ˆ?Î;€ã‘¤ÀNáÆƒæø!I8I ìDc ˆ?Î;€ã‘¤ÀN4–€øã¼8I ìDc ˆ?Î;€ã‘¤ÀNá×% ~HRŽG’;ÑXâóàx$)°% þÂ×}fÕ8I ìD’ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`§pciÂÖ(€•…$àx$)°% þ8ïŽG’;ÑXâóàx$)°SøõGc ˆ’€ã‘¤ÀN4–€øã¼8I ìDc ˆ?Î;€ã‘¤ÀN4–€ø _÷™UàX$)°I œwÇ#Ih,ñÇyp<’؉ÆœwÇ#Ih,ñÇyp<’؉ÆœwÇ#Ih,ñÇyp<’؉ÆœwÇ#Ih,ñÇyp<’؉ÆœwÇ#Ih,ñÇyp<’Ø)ÜXš°5 `e!I8I ìDc ˆ?Î;€ã‘¤ÀN4–€øã¼8I ìDc ˆ?Î;€ã‘¤ÀNá×% ~HRŽG’;ÑXâ/|Ý`߯«IDATg €c‘¤ÀN$)@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñHR`'K@üqÞ$v¢±ÄçÀñ\v0ƒŸJúS»ƒ@Âú ¤ŸÙ;)ÜòCI†¤Úœƒ;)°}ã0 I ì~ýMسùÿÓlòÜÚ¤þIEND®B`‚magnum-6.1.0/playbooks/0000775000175100017510000000000013244017675015042 5ustar zuulzuul00000000000000magnum-6.1.0/playbooks/magnum-functional-base.yaml0000666000175100017510000000745313244017334022265 0ustar zuulzuul00000000000000- hosts: primary tasks: - shell: cmd: | set -e set -x cat << 'EOF' >>"/tmp/dg-local.conf" [[local|localrc]] # Enable Magnum Tempest plugin TEMPEST_PLUGINS='/opt/stack/new/magnum-tempest-plugin' EOF executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true if [ "{{ neutron }}" -eq 1 ] ; then export DEVSTACK_GATE_NEUTRON=1 fi export DEVSTACK_GATE_TEMPEST=1 if [ "{{ tempest }}" -eq 0 ] ; then # Do not run any tempest tests export DEVSTACK_GATE_TEMPEST_NOTESTS=1 fi if [ "{{ branch_override }}" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export PROJECTS="openstack/magnum $PROJECTS" export PROJECTS="openstack/python-magnumclient $PROJECTS" export PROJECTS="openstack/diskimage-builder $PROJECTS" export PROJECTS="openstack/magnum-tempest-plugin $PROJECTS" if [ "{{ multinode }}" -eq 1 ] ; then export DEVSTACK_GATE_TOPOLOGY="multinode" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"disable_service tempest" fi if [ "{{ ironic }}" -eq 1 ] ; then export PROJECTS="openstack/ironic $PROJECTS" export PROJECTS="openstack/ironic-lib $PROJECTS" export PROJECTS="openstack/ironic-python-agent $PROJECTS" export PROJECTS="openstack/python-ironicclient $PROJECTS" export PROJECTS="openstack/pyghmi $PROJECTS" export PROJECTS="openstack/virtualbmc $PROJECTS" export MAGNUM_GATE_SPECIAL="-ironic" fi if [ "{{ horizon }}" -eq 0 ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service horizon" else export DEVSTACK_GATE_HORIZON=1 fi if [ "{{ swift }}" -eq 0 ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" fi if [ "{{ ceilometer }}" -eq 0 ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acentral" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acompute" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-evaluator" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-notifier" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-api" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-collector" fi # Keep localrc to be able to set some vars in post_test_hook export KEEP_LOCALRC=1 function gate_hook { cd /opt/stack/new/magnum/ ./magnum/tests/contrib/gate_hook.sh {{ coe }} $MAGNUM_GATE_SPECIAL } export -f gate_hook function post_test_hook { source $BASE/new/devstack/accrc/admin/admin cd /opt/stack/new/magnum/ ./magnum/tests/contrib/post_test_hook.sh {{ coe }} $MAGNUM_GATE_SPECIAL } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' magnum-6.1.0/playbooks/pre/0000775000175100017510000000000013244017675015630 5ustar zuulzuul00000000000000magnum-6.1.0/playbooks/pre/prepare-workspace.yaml0000666000175100017510000000127413244017334022144 0ustar zuulzuul00000000000000- hosts: all name: magnum-prepare-workspace tasks: - name: Ensure workspace directory exists file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' magnum-6.1.0/playbooks/pre/prepare-workspace-images.yaml0000666000175100017510000000052713244017334023407 0ustar zuulzuul00000000000000- hosts: all tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: /usr/local/jenkins/slave_scripts/install-distro-packages.sh chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' magnum-6.1.0/playbooks/magnum-buildimages-base.yaml0000666000175100017510000000767013244017334022411 0ustar zuulzuul00000000000000- hosts: primary tasks: - shell: cmd: | set -u set -e set -x cd ~ if [[ "{{ image_name }}" =~ ^(ubuntu-mesos|centos-dcos)$ ]]; then EXTRA_PROJECTS="openstack/tripleo-image-elements openstack/heat-templates" else EXTRA_PROJECTS="" fi /usr/zuul-env/bin/zuul-cloner --cache-dir /opt/git \ git://git.openstack.org \ openstack/diskimage-builder \ openstack/dib-utils \ openstack/magnum $EXTRA_PROJECTS virtualenv env ./env/bin/pip install $(pwd)/openstack/dib-utils ./env/bin/pip install $(pwd)/openstack/diskimage-builder # TODO(pabelanger): Remove once we migrated to bindep ./openstack/diskimage-builder/tests/install_test_deps.sh # activate the virtualenv so that any tools run by dib run # using the python inside it set +u source ./env/bin/activate set -u DIB_ELEMENTS=./openstack/diskimage-builder/diskimage_builder/elements if [ "{{ image_name }}" == "ubuntu-mesos" ]; then TRIPLEO_ELEMENTS=./openstack/tripleo-image-elements/elements HEAT_ELEMENTS=./openstack/heat-templates/hot/software-config/elements MESOS_ELEMENTS=./openstack/magnum/magnum/drivers/mesos_ubuntu_v1/image export ELEMENTS_PATH=$TRIPLEO_ELEMENTS:$HEAT_ELEMENTS:$MESOS_ELEMENTS $MESOS_ELEMENTS/install_imagebuild_deps.sh export DIB_RELEASE=trusty export DIB_IMAGE_SIZE=2.2 disk-image-create ubuntu vm docker mesos \ os-collect-config os-refresh-config os-apply-config \ heat-config heat-config-script -o $WORKSPACE/{{ image_name }}.qcow2 $MESOS_ELEMENTS/validate_image.sh $WORKSPACE/{{ image_name }}.qcow2 elif [ "{{ image_name }}" == "centos-dcos" ]; then DCOS_ELEMENTS=./openstack/magnum/contrib/drivers/dcos_centos_v1/image TRIPLEO_ELEMENTS=./openstack/tripleo-image-elements/elements HEAT_ELEMENTS=./openstack/heat-templates/hot/software-config/elements # Order matters, we need the docker elements from DCOS_ELEMENTS to be used first export ELEMENTS_PATH=$DCOS_ELEMENTS:$DIB_ELEMENTS:$TRIPLEO_ELEMENTS:$HEAT_ELEMENTS $DCOS_ELEMENTS/install_imagebuild_deps.sh export DIB_IMAGE_SIZE=3.0 export FS_TYPE=xfs curl -O https://downloads.dcos.io/dcos/stable/commit/e64024af95b62c632c90b9063ed06296fcf38ea5/dcos_generate_config.sh export DCOS_GENERATE_CONFIG_SRC=`pwd`/dcos_generate_config.sh disk-image-create \ centos7 vm docker dcos selinux-permissive \ os-collect-config os-refresh-config os-apply-config \ heat-config heat-config-script \ -o $WORKSPACE/{{ image_name }}.qcow2 #TODO: Add size validation else MAGNUM_ELEMENTS=./openstack/magnum/magnum/drivers/common/image export ELEMENTS_PATH=$DIB_ELEMENTS:$MAGNUM_ELEMENTS $MAGNUM_ELEMENTS/fedora-atomic/install_imagebuild_deps.sh export DIB_RELEASE="25" export DIB_IMAGE_SIZE=2.5 export FEDORA_ATOMIC_TREE_URL="https://kojipkgs.fedoraproject.org/atomic/25/" export FEDORA_ATOMIC_TREE_REF="8b15e9b988b4b02f4cb8b39bdd63d182ab7004a8926ecdac6314ee5c7ffa646b" disk-image-create -x -o $WORKSPACE/{{ image_name }}-dib fedora-atomic # validate image $MAGNUM_ELEMENTS/fedora-atomic/validate_atomic_image.sh $WORKSPACE/{{ image_name }}-dib.qcow2 fi set +u deactivate set -u executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' magnum-6.1.0/playbooks/post/0000775000175100017510000000000013244017675016027 5ustar zuulzuul00000000000000magnum-6.1.0/playbooks/post/upload-images.yaml0000666000175100017510000000112613244017334021434 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Ensure artifacts directory exists file: path: '{{ zuul.executor.work_root }}/artifacts' state: directory delegate_to: localhost - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.work_root }}/artifacts/images' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/*.qcow2 - --include=*/ - --exclude=* - --prune-empty-dirs magnum-6.1.0/playbooks/post/upload-logs.yaml0000666000175100017510000000063213244017334021134 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs magnum-6.1.0/api-ref/0000775000175100017510000000000013244017675014362 5ustar zuulzuul00000000000000magnum-6.1.0/api-ref/source/0000775000175100017510000000000013244017675015662 5ustar zuulzuul00000000000000magnum-6.1.0/api-ref/source/versions.inc0000666000175100017510000000435713244017334020230 0ustar zuulzuul00000000000000.. -*- rst -*- ============== API Versions ============== In order to bring new features to users over time, the Magnum API supports versioning. There are two kinds of versions in Magnum. - ''major versions'', which have dedicated urls - ''microversions'', which can be requested through the use of the ``OpenStack-API-Version``. Beginning with the Newton release, all API requests support the ``OpenStack-API-Version`` header. This header SHOULD be supplied with every request; in the absence of this header, each request is treated as though coming from an older pre-Newton client. This was done to preserve backwards compatibility as we introduced new features. The Version APIs work differently from other APIs as they *do not* require authentication. List API Versions ======================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each API version, as well as information about supported min and max microversions. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 503 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - versions: version - status: version_status - min_version: version_min - max_version: version_max - id: version_id - links: links - name: name - description: description Response Example ---------------- .. literalinclude:: samples/versions-get-resp.json :language: javascript Show v1 API Version ==================================== .. rest_method:: GET /v1/ Show all the resources within the Magnum v1 API. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 503 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - id: version_id - links: links .. note:: The ``media-types`` parameters in the response are vestigial and provide no useful information. They will probably be deprecated and removed in the future. Response Example ---------------- .. literalinclude:: samples/versions-01-get-resp.json :language: javascript magnum-6.1.0/api-ref/source/stats.inc0000666000175100017510000000236413244017334017512 0ustar zuulzuul00000000000000.. -*- rst -*- ================= Magnum Stats API ================= An admin user can get stats for the given tenant and also overall system stats. A non-admin user can get self stats. Show stats for a tenant ======================= .. rest_method:: GET /v1/stats?project_id= Get stats based on project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response -------- .. rest_parameters:: parameters.yaml - clusters: clusters - nodes: nodes Response Example ---------------- .. literalinclude:: samples/stats-get-resp.json :language: javascript Show overall stats ================== .. rest_method:: GET /v1/stats Show overall Magnum system stats. If the requester is non-admin user show self stats. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - clusters: clusters - nodes: nodes Response Example ---------------- .. literalinclude:: samples/stats-get-resp.json :language: javascript magnum-6.1.0/api-ref/source/clustertemplates.inc0000666000175100017510000002033013244017334021745 0ustar zuulzuul00000000000000.. -*- rst -*- ========================== Manage Cluster Templates ========================== Lists, creates, shows details for, updates, and deletes Cluster Templates. Create new cluster template ===================================== .. rest_method:: POST /v1/clustertemplates Create new cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - labels: labels - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - no_proxy: no_proxy - https_proxy: https_proxy - http_proxy: http_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - name: name - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Request Example ---------------- .. literalinclude:: samples/clustertemplate-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascript List all cluster templates ========================== .. rest_method:: GET /v1/clustertemplates List all available cluster templates in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/clustertemplate-get-all-resp.json :language: javascript Show details of a cluster template ================================== .. rest_method:: GET /v1/clustertemplates/{clustertemplate_ident} Get all information of a cluster template in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascript Delete a cluster template ========================= .. rest_method:: DELETE /v1/clustertemplates/{clustertemplate_ident} Delete a cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of cluster template ================================================ .. rest_method:: PATCH /v1/clustertemplates/{clustertemplate_ident} Update information of one cluster template attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident - path: path - value: value - op: op Request Example ---------------- .. literalinclude:: samples/clustertemplate-update-req.json :language: javascript Response -------- Return new cluster templates with updated attributes. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascriptmagnum-6.1.0/api-ref/source/clusters.inc0000666000175100017510000001113413244017334020213 0ustar zuulzuul00000000000000.. -*- rst -*- ================ Manage Cluster ================ Lists, creates, shows details for, updates, and deletes Cluster. Create new cluster ================== .. rest_method:: POST /v1/clusters Create new cluster based on cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - name: name - discovery_url: discovery_url - master_count: master_count - cluster_template_id: clustertemplate_id - node_count: node_count - create_timeout: create_timeout - keypair: keypair_id - master_flavor_id: master_flavor_id - labels: labels - flavor_id: flavor_id .. note:: Request for creating cluster is asynchronous from Newton. Request Example ---------------- .. literalinclude:: samples/cluster-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-create-resp.json :language: javascript List all clusters ================= .. rest_method:: GET /v1/clusters List all clusters in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clusters: cluster_list - status: status - uuid: cluster_id - links: links - stack_id: stack_id - keypair: keypair_id - master_count: master_count - cluster_template_id: clustertemplate_id - node_count: node_count - create_timeout: create_timeout - name: name Response Example ---------------- .. literalinclude:: samples/cluster-get-all-resp.json :language: javascript Show details of a cluster ========================= .. rest_method:: GET /v1/clusters/{cluster_ident} Get all information of a cluster in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - status: status - uuid: cluster_id - links: links - stack_id: stack_id - created_at: created_at - api_address: api_address - discovery_url: discovery_url - updated_at: updated_at - master_count: master_count - coe_version: coe_version - keypair: keypair_id - cluster_template_id: clustertemplate_id - master_addresses: master_addresses - node_count: node_count - node_addresses: node_addresses - status_reason: status_reason - create_timeout: create_timeout - name: name Response Example ---------------- .. literalinclude:: samples/cluster-get-one-resp.json :language: javascript Delete a cluster ==================== .. rest_method:: DELETE /v1/clusters/{cluster_ident} Delete a cluster. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of cluster ============================= .. rest_method:: PATCH /v1/clusters/{cluster_ident} Update information of one cluster attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident - path: path - value: value - op: op .. note:: Request for updating cluster is asynchronous from Newton. Currently only attribute ``node_count`` are supported for operation ``replace`` and ``remove``. Request Example ---------------- .. literalinclude:: samples/cluster-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-create-resp.json :language: javascript magnum-6.1.0/api-ref/source/parameters.yaml0000666000175100017510000004111313244017334020703 0ustar zuulzuul00000000000000# Header params request_id: type: UUID in: header required: true description: | A unique ID for tracking service request. The request ID associated with the request by default appears in the service logs. # Path params bay_ident: type: string in: path required: true description: | The UUID or name of bays in Magnum. baymodel_ident: description: | The UUID or name of baymodels in Magnum. in: path required: true type: string cluster_ident: type: string in: path required: true description: | The UUID or name of clusters in Magnum. clustertemplate_ident: type: string in: path required: true description: | The UUID or name of cluster templates in Magnum. project_id: type: string in: path required: true description: | Project ID. # Body params api_address: description: | The endpoint URL of COE API exposed to end-users. in: body format: uri required: true type: string apiserver_port: type: integer in: body required: true description: | The exposed port of COE API server. bay_create_timeout: type: integer in: body required: true description: | The timeout for bay creation in minutes. The value expected is a positive integer and the default is 60 minutes. If the timeout is reached during bay creation process, the operation will be aborted and the bay status will be set to ``CREATE_FAILED``. bay_id: type: UUID in: body required: true description: | The UUID of the bay. bay_list: type: array in: body required: true description: | The list of all bays in Magnum. The list of all clusters in Magnum. baymodel_id: type: UUID in: body required: true description: | The UUID of the baymodel. baymodel_list: type: array in: body required: true description: | The list of all baymodels in Magnum. binary: type: string in: body required: true description: | The name of the binary form of the Magnum service. cluster_distro: type: string in: body required: true description: | Display the attribute ``os_distro`` defined as appropriate metadata in image for the bay/cluster driver. cluster_id: type: UUID in: body required: true description: | The UUID of the cluster. cluster_list: type: array in: body required: true description: | The list of all clusters in Magnum. clusters: type: integer in: body required: true description: | The number of clusters. clustertemplate_id: type: UUID in: body required: true description: | The UUID of the cluster template. clustertemplate_list: type: array in: body required: true description: | The list of all cluster templates in Magnum. coe: type: string in: body required: true description: | Specify the Container Orchestration Engine to use. Supported COEs include ``kubernetes``, ``swarm``, ``mesos``. If your environment has additional bay/cluster drivers installed, refer to the bay/cluster driver documentation for the new COE names. coe_version: type: string in: body required: true description: | Version info of chosen COE in bay/cluster for helping client in picking the right version of client. create_timeout: type: integer in: body required: true description: | The timeout for cluster creation in minutes. The value expected is a positive integer and the default is 60 minutes. If the timeout is reached during cluster creation process, the operation will be aborted and the cluster status will be set to ``CREATE_FAILED``. created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string csr: description: | Certificate Signing Request (CSR) for authenticating client key. The CSR will be used by Magnum to generate a signed certificate that client will use to communicate with the Bay/Cluster. in: body required: true type: string description: description: | Descriptive text about the Magnum service. in: body required: true type: string disabled_reason: description: | The disable reason of the service, ``null`` if the service is enabled or disabled without reason provided. in: body required: true type: string discovery_url: description: | The custom discovery url for node discovery. This is used by the COE to discover the servers that have been created to host the containers. The actual discovery mechanism varies with the COE. In some cases, Magnum fills in the server info in the discovery service. In other cases, if the ``discovery_url`` is not specified, Magnum will use the public discovery service at: :: https://discovery.etcd.io In this case, Magnum will generate a unique url here for each bay and store the info for the servers. in: body format: uri required: true type: string dns_nameserver: description: | The DNS nameserver for the servers and containers in the bay/cluster to use. This is configured in the private Neutron network for the bay/cluster. The default is ``8.8.8.8``. in: body required: true type: string docker_storage_driver: description: | The name of a driver to manage the storage for the images and the container's writable layer. The default is ``devicemapper``. in: body required: true type: string docker_volume_size: description: | The size in GB for the local storage on each server for the Docker daemon to cache the images and host the containers. Cinder volumes provide the storage. The default is 25 GB. For the ``devicemapper`` storage driver, the minimum value is 3GB. For the ``overlay`` storage driver, the minimum value is 1GB. in: body required: true type: integer external_network_id: description: | The name or network ID of a Neutron network to provide connectivity to the external internet for the bay/cluster. This network must be an external network, i.e. its attribute ``router:external`` must be ``True``. The servers in the bay/cluster will be connected to a private network and Magnum will create a router between this private network and the external network. This will allow the servers to download images, access discovery service, etc, and the containers to install packages, etc. In the opposite direction, floating IPs will be allocated from the external network to provide access from the external internet to servers and the container services hosted in the bay/cluster. in: body required: true type: string fixed_network: description: | The name or network ID of a Neutron network to provide connectivity to the internal network for the bay/cluster. in: body required: false type: string fixed_subnet: description: | Fixed subnet that are using to allocate network address for nodes in bay/cluster. in: body required: false type: string flavor_id: description: | The nova flavor ID or name for booting the node servers. The default is ``m1.small``. in: body required: true type: string floating_ip_enabled: description: | Whether enable or not using the floating IP of cloud provider. Some cloud providers used floating IP, some used public IP, thus Magnum provide this option for specifying the choice of using floating IP. in: body required: true type: boolean host: description: | The host for the service. in: body required: true type: string http_proxy: description: | The IP address for a proxy to use when direct http access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is ``None``. in: body required: false type: string https_proxy: description: | The IP address for a proxy to use when direct https access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is ``None``. in: body required: false type: string id_s: description: | The ID of the Magnum service. in: body required: true type: string image_id: description: | The name or UUID of the base image in Glance to boot the servers for the bay/cluster. The image must have the attribute ``os_distro`` defined as appropriate for the bay/cluster driver. in: body required: true type: string insecure_registry: description: | The URL pointing to users's own private insecure docker registry to deploy and run docker containers. in: body required: true type: string keypair_id: description: | The name of the SSH keypair to configure in the bay/cluster servers for ssh access. Users will need the key to be able to ssh to the servers in the bay/cluster. The login name is specific to the bay/cluster driver, for example with fedora-atomic image, default login name is ``fedora``. in: body required: true type: string labels: description: | Arbitrary labels in the form of ``key=value`` pairs. The accepted keys and valid values are defined in the bay/cluster drivers. They are used as a way to pass additional parameters that are specific to a bay/cluster driver. in: body required: false type: array links: description: | Links to the resources in question. in: body required: true type: array master_addresses: description: | List of floating IP of all master nodes. in: body required: true type: array master_count: description: | The number of servers that will serve as master for the bay/cluster. The default is 1. Set to more than 1 master to enable High Availability. If the option ``master-lb-enabled`` is specified in the baymodel/cluster template, the master servers will be placed in a load balancer pool. in: body required: true type: integer master_flavor_id: description: | The flavor of the master node for this baymodel/cluster template. in: body required: false type: string master_lb_enabled: description: | Since multiple masters may exist in a bay/cluster, a Neutron load balancer is created to provide the API endpoint for the bay/cluster and to direct requests to the masters. In some cases, such as when the LBaaS service is not available, this option can be set to ``false`` to create a bay/cluster without the load balancer. In this case, one of the masters will serve as the API endpoint. The default is ``true``, i.e. to create the load balancer for the bay. in: body required: true type: boolean mservices: description: | A list of Magnum services. in: body required: true type: array name: description: | Name of the resource. in: body required: true type: string network_driver: description: | The name of a network driver for providing the networks for the containers. Note that this is different and separate from the Neutron network for the bay/cluster. The operation and networking model are specific to the particular driver. in: body required: true type: string no_proxy: description: | When a proxy server is used, some sites should not go through the proxy and should be accessed normally. In this case, users can specify these sites as a comma separated list of IPs. The default is ``None``. in: body required: false type: string node_addresses: description: | List of floating IP of all servers that serve as node. in: body required: true type: array node_count: description: | The number of servers that will serve as node in the bay/cluster. The default is 1. in: body required: true type: integer nodes: description: | The total number of nodes including master nodes. in: body required: true type: integer op: description: | The operation used to modify resource's attributes. Supported operations are following: ``add``, ``replace`` and ``remove``. In case of ``remove``, users only need to provide ``path`` for deleting attribute. in: body required: true type: string path: description: | Resource attribute's name. in: body required: true type: string pem: description: | CA certificate for the bay/cluster. in: body required: true type: string public_type: description: | Access to a baymodel/cluster template is normally limited to the admin, owner or users within the same tenant as the owners. Setting this flag makes the baymodel/cluster template public and accessible by other users. The default is not public. in: body required: true type: boolean registry_enabled: description: | Docker images by default are pulled from the public Docker registry, but in some cases, users may want to use a private registry. This option provides an alternative registry based on the Registry V2: Magnum will create a local registry in the bay/cluster backed by swift to host the images. The default is to use the public registry. in: body required: false type: boolean report_count: description: | The total number of report. in: body required: true type: integer server_type: description: | The servers in the bay/cluster can be ``vm`` or ``baremetal``. This parameter selects the type of server to create for the bay/cluster. The default is ``vm``. in: body required: true type: string stack_id: description: | The reference UUID of orchestration stack from Heat orchestration service. in: body required: true type: UUID state: description: | The current state of Magnum services. in: body required: true type: string status: description: | The current state of the bay/cluster. in: body required: true type: string status_reason: description: | The reason of bay/cluster current status. in: body required: true type: string tls_disabled: description: | Transport Layer Security (TLS) is normally enabled to secure the bay/cluster. In some cases, users may want to disable TLS in the bay/cluster, for instance during development or to troubleshoot certain problems. Specifying this parameter will disable TLS so that users can access the COE endpoints without a certificate. The default is TLS enabled. in: body required: true type: boolean updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string value: description: | Resource attribute's value. in: body required: true type: string version: description: | The version. in: body required: true type: string version_id: type: string in: body required: true description: > A common name for the version in question. Informative only, it has no real semantic meaning. version_max: type: string in: body required: true description: > If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. version_min: type: string in: body required: true description: > If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. version_status: type: string in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``SUPPORTED``: this is an older, but still supported version of the API - ``DEPRECATED``: a deprecated version of the API that is slated for removal volume_driver: type: string in: body required: true description: > The name of a volume driver for managing the persistent storage for the containers. The functionality supported are specific to the driver. magnum-6.1.0/api-ref/source/bays.inc0000666000175100017510000001040613244017334017306 0ustar zuulzuul00000000000000.. -*- rst -*- ============ Manage Bay ============ Lists, creates, shows details for, updates, and deletes Bay. Create new bay ============== .. rest_method:: POST /v1/bays Create new bay based on bay model. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - name: name - discovery_url: discovery_url - master_count: master_count - baymodel_id: baymodel_id - node_count: node_count - bay_create_timeout: bay_create_timeout .. note:: Request for creating bay is asynchronous from Newton. Request Example ---------------- .. literalinclude:: samples/bay-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: bay_id Response Example ---------------- .. literalinclude:: samples/bay-create-resp.json :language: javascript List all bays ==================== .. rest_method:: GET /v1/bays/ List all bays in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - bays: bay_list - status: status - uuid: bay_id - links: links - stack_id: stack_id - master_count: master_count - baymodel_id: baymodel_id - node_count: node_count - bay_create_timeout: bay_create_timeout - name: name Response Example ---------------- .. literalinclude:: samples/bay-get-all-resp.json :language: javascript Show details of a bay ============================= .. rest_method:: GET /v1/bays/{bay_ident} Get all information of a bay in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - bay_ident: bay_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - status: status - uuid: bay_id - links: links - stack_id: stack_id - created_at: created_at - api_address: api_address - discovery_url: discovery_url - updated_at: updated_at - master_count: master_count - coe_version: coe_version - baymodel_id: baymodel_id - master_addresses: master_addresses - node_count: node_count - node_addresses: node_addresses - status_reason: status_reason - bay_create_timeout: bay_create_timeout - name: name Response Example ---------------- .. literalinclude:: samples/bay-get-one-resp.json :language: javascript Delete a bay ==================== .. rest_method:: DELETE /v1/bays/{bay_ident} Delete a bay. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - bay_ident: bay_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of bay ================================= .. rest_method:: PATCH /v1/bays/{bay_ident} Update information of one bay attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - bay_ident: bay_ident - path: path - value: value - op: op .. note:: Request for updating bay is asynchronous from Newton. Currently only attribute ``node_count`` are supported for operation ``replace`` and ``remove``. Request Example ---------------- .. literalinclude:: samples/bay-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: bay_id Response Example ---------------- .. literalinclude:: samples/bay-create-resp.json :language: javascript magnum-6.1.0/api-ref/source/conf.py0000666000175100017510000001720413244017334017157 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Magnum documentation build configuration file # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys import warnings extensions = [ 'os_api_ref', ] import openstackdocstheme # noqa html_theme = 'openstackdocs' html_theme_path = [openstackdocstheme.get_html_theme_path()] html_theme_options = { "sidebar_mode": "toc", } # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Container Infrastructure Management API Reference' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from magnum.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Config logABug feature # source tree giturl = ( u'https://git.openstack.org/cgit/openstack/magnum/tree/api-ref/source') # html_context allows us to pass arbitrary values into the html template html_context = {'bug_tag': 'api-ref', 'giturl': giturl, 'bug_project': 'magnum'} # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] try: html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') except Exception: warnings.warn('Cannot get last updated time from git repository. ' 'Not setting "html_last_updated_fmt".') # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'magnumdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Magnum.tex', u'OpenStack Container Infrastructure Management API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True magnum-6.1.0/api-ref/source/baymodels.inc0000666000175100017510000001753013244017334020334 0ustar zuulzuul00000000000000.. -*- rst -*- =================== Manage Baymodels =================== Lists, creates, shows details for, updates, and deletes baymodels. Create new baymodel ==================== .. rest_method:: POST /v1/baymodels/ Create new baymodel. Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - labels: labels - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - no_proxy: no_proxy - https_proxy: https_proxy - http_proxy: http_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - name: name - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Request Example ---------------- .. literalinclude:: samples/baymodel-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: baymodel_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/baymodel-create-resp.json :language: javascript List all baymodels ================== .. rest_method:: GET /v1/baymodels/ List all available baymodels in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - baymodels: baymodel_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: baymodel_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/baymodel-get-all-resp.json :language: javascript Show details of a baymodel ========================== .. rest_method:: GET /v1/baymodels/{baymodel_ident} Get all information of a baymodel in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - baymodel_ident: baymodel_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - baymodels: baymodel_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: baymodel_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/baymodel-create-resp.json :language: javascript Delete a baymodel ================== .. rest_method:: DELETE /v1/baymodels/{baymodel_ident} Delete a baymodel. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - baymodel_ident: baymodel_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of baymodel =============================== .. rest_method:: PATCH /v1/baymodels/{baymodel_ident} Update information of one baymodel attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - baymodel_ident: baymodel_ident - path: path - value: value - op: op Request Example ---------------- .. literalinclude:: samples/baymodel-update-req.json :language: javascript Response -------- Return new baymodel with updated attributes. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - baymodels: baymodel_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: baymodel_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver Response Example ---------------- .. literalinclude:: samples/baymodel-create-resp.json :language: javascriptmagnum-6.1.0/api-ref/source/index.rst0000666000175100017510000000064513244017334017522 0ustar zuulzuul00000000000000:tocdepth: 2 ======================================== Container Infrastructure Management API ======================================== .. rest_expand_all:: .. include:: versions.inc .. include:: urls.inc .. include:: bays.inc .. include:: baymodels.inc .. include:: clusters.inc .. include:: clustertemplates.inc .. include:: certificates.inc .. include:: mservices.inc .. include:: stats.inc .. include:: quotas.inc magnum-6.1.0/api-ref/source/certificates.inc0000666000175100017510000000521313244017334021015 0ustar zuulzuul00000000000000.. -*- rst -*- ===================================== Manage certificates for bay/cluster ===================================== Generates and show CA certificates for bay/cluster. Show details about the CA certificate for a bay/cluster ======================================================= .. rest_method:: GET /v1/certificates/{bay_uuid/cluster_uuid} Show CA certificate details that are associated with the created bay/cluster. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - bay_uuid: bay_id .. note:: After Newton, all terms related bay/baymodel will be renamed to cluster and cluster template. Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - cluster_uuid: cluster_id - pem: pem - bay_uuid: bay_id - links: links .. note:: After Newton, all terms related bay/baymodel will be renamed to cluster and cluster template. Response Example ---------------- .. literalinclude:: samples/certificates-ca-show-resp.json :language: javascript Generate the CA certificate for a bay/cluster ============================================= .. rest_method:: POST /v1/certificates/ Sign client key and generate the CA certificate for a bay/cluster Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - bay_uuid: bay_id - csr: csr .. note:: After Newton, all terms related bay/baymodel will be renamed to cluster and cluster template. Request Example ---------------- .. literalinclude:: samples/certificates-ca-sign-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - pem: pem - bay_uuid: bay_id - links: links - csr: csr .. note:: After Newton, all terms related bay/baymodel will be renamed to cluster and cluster template. Response Example ---------------- .. literalinclude:: samples/certificates-ca-sign-resp.json :language: javascript Rotate the CA certificate for a bay/cluster =========================================== .. rest_method:: PATCH /v1/certificates/{bay_uuid/cluster_uuid} Rotate the CA certificate for a bay/cluster and invalidate all user certificates. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - cluster: cluster_id magnum-6.1.0/api-ref/source/quotas.inc0000777000175100017510000000451213244017334017670 0ustar zuulzuul00000000000000.. -*- rst -*- ================= Magnum Quota API ================= Lists, creates, shows details, and updates Quotas. Set new quota ================== .. rest_method:: POST /v1/quotas Create new quota for a project. Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-create-req.json :language: javascript Response Example ---------------- .. literalinclude:: samples/quota-create-resp.json :language: javascript List all quotas ================ .. rest_method:: GET /v1/quotas List all quotas in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response Example ---------------- .. literalinclude:: samples/quota-get-all-resp.json :language: javascript Show details of a quota ========================= .. rest_method:: GET /v1/quotas/{project_id}/{resource} Get quota information for the given project_id and resource. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Response Example ---------------- .. literalinclude:: samples/quota-get-one-resp.json :language: javascript Update a resource quota ============================= .. rest_method:: PATCH /v1/quotas/{project_id}/{resource} Update resource quota for the given project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-update-req.json :language: javascript Response Example ---------------- .. literalinclude:: samples/quota-update-resp.json :language: javascript Delete a resource quota ============================ .. rest_method:: DELETE /v1/quotas/{project_id}/{resource} Delete a resource quota for the given project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-delete-req.json :language: javascriptmagnum-6.1.0/api-ref/source/status.yaml0000666000175100017510000000340013244017334020060 0ustar zuulzuul00000000000000################# # Success Codes # ################# 200: default: | Request was successful. 201: default: | Resource was created and is ready to use. 202: default: | Request was accepted for processing, but the processing has not been completed. A 'location' header is included in the response which contains a link to check the progress of the request. 204: default: | The server has fulfilled the request by deleting the resource. 300: default: | There are multiple choices for resources. The request has to be more specific to successfully retrieve one of these resources. 302: default: | The response is about a redirection hint. The header of the response usually contains a 'location' value where requesters can check to track the real location of the resource. ################# # Error Codes # ################# 400: default: | Some content in the request was invalid. resource_signal: | The target resource doesn't support receiving a signal. 401: default: | User must authenticate before making a request. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint. 409: default: | This operation conflicted with another operation on this resource. duplicate_zone: | There is already a zone with this name. 500: default: | Something went wrong inside the service. This should not happen usually. If it does happen, it means the server has experienced some serious problems. 503: default: | Service is not available. This is mostly caused by service configuration errors which prevents the service from successful start up. magnum-6.1.0/api-ref/source/urls.inc0000666000175100017510000000261013244017334017333 0ustar zuulzuul00000000000000.. -*- rst -*- ================= Magnum Base URLs ================= All API calls through the rest of this document require authentication with the OpenStack Identity service. They also required a ``url`` that is extracted from the Identity token of type ``container-infra``. This will be the root url that every call below will be added to build a full path. Note that if using OpenStack Identity service API v2, ``url`` can be represented via ``adminURL``, ``internalURL`` or ``publicURL`` in endpoint catalog. In Identity service API v3, ``url`` is represented with field ``interface`` including ``admin``, ``internal`` and ``public``. For instance, if the ``url`` is ``http://my-container-infra.org/magnum/v1`` then the full API call for ``/clusters`` is ``http://my-container-infra.org/magnum/v1/clusters``. Depending on the deployment the container infrastructure management service url might be http or https, a custom port, a custom path, and include your project id. The only way to know the urls for your deployment is by using the service catalog. The container infrastructure management URL should never be hard coded in applications, even if they are only expected to work at a single site. It should always be discovered from the Identity token. As such, for the rest of this document we will be using short hand where ``GET /clusters`` really means ``GET {your_container_infra_url}/clusters``. magnum-6.1.0/api-ref/source/samples/0000775000175100017510000000000013244017675017326 5ustar zuulzuul00000000000000magnum-6.1.0/api-ref/source/samples/mservice-get-resp.json0000666000175100017510000000047513244017334023562 0ustar zuulzuul00000000000000{ "mservices":[ { "binary":"magnum-conductor", "created_at":"2016-08-23T10:52:13+00:00", "state":"up", "report_count":2179, "updated_at":"2016-08-25T01:13:16+00:00", "host":"magnum-manager", "disabled_reason":null, "id":1 } ] }magnum-6.1.0/api-ref/source/samples/quota-create-resp.json0000666000175100017510000000026713244017334023561 0ustar zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:48+00:00", "updated_at": null, "hard_limit": 1, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }magnum-6.1.0/api-ref/source/samples/certificates-ca-show-resp.json0000666000175100017510000000104013244017334025161 0ustar zuulzuul00000000000000{ "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4" "pem":"-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80GoUs4xDANBgkqhkiG9w0BAQsFADAO\n-----END CERTIFICATE-----\n", "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "links":[ { "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"bookmark" } ] } magnum-6.1.0/api-ref/source/samples/baymodel-update-req.json0000666000175100017510000000026313244017334024055 0ustar zuulzuul00000000000000[ { "path":"/master_lb_enabled", "value":"True", "op":"replace" }, { "path":"/registry_enabled", "value":"True", "op":"replace" } ]magnum-6.1.0/api-ref/source/samples/baymodel-get-all-resp.json0000666000175100017510000000276013244017334024306 0ustar zuulzuul00000000000000{ "baymodels":[ { "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"self" }, { "href":"http://10.164.180.104:9511/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm2", "created_at":"2016-08-29T02:08:08+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" } ] }magnum-6.1.0/api-ref/source/samples/clustertemplate-create-req.json0000666000175100017510000000130313244017334025453 0ustar zuulzuul00000000000000{ "labels":{ }, "fixed_subnet":null, "master_flavor_id":null, "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "http_proxy":"http://10.164.177.169:8080", "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "name":"k8s-bm2", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }magnum-6.1.0/api-ref/source/samples/cluster-get-one-resp.json0000666000175100017510000000174313244017334024204 0ustar zuulzuul00000000000000{ "status":"CREATE_COMPLETE", "uuid":"746e779a-751a-456b-a3e9-c883d734946f", "links":[ { "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", "rel":"self" }, { "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", "rel":"bookmark" } ], "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", "created_at":"2016-08-29T06:51:31+00:00", "api_address":"https://172.24.4.6:6443", "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", "updated_at":"2016-08-29T06:53:24+00:00", "master_count":1, "coe_version": "v1.2.0", "keypair":"my_keypair" "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "master_addresses":[ "172.24.4.6" ], "node_count":1, "node_addresses":[ "172.24.4.13" ], "status_reason":"Stack CREATE completed successfully", "create_timeout":60, "name":"k8s" } magnum-6.1.0/api-ref/source/samples/bay-update-req.json0000666000175100017510000000011713244017334023032 0ustar zuulzuul00000000000000[ { "path":"/node_count", "value":2, "op":"replace" } ]magnum-6.1.0/api-ref/source/samples/cluster-update-req.json0000666000175100017510000000011713244017334023740 0ustar zuulzuul00000000000000[ { "path":"/node_count", "value":2, "op":"replace" } ]magnum-6.1.0/api-ref/source/samples/quota-update-resp.json0000666000175100017510000000031713244017334023574 0ustar zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:20+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }magnum-6.1.0/api-ref/source/samples/baymodel-create-req.json0000666000175100017510000000130313244017334024032 0ustar zuulzuul00000000000000{ "labels":{ }, "fixed_subnet":null, "master_flavor_id":null, "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "http_proxy":"http://10.164.177.169:8080", "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "name":"k8s-bm2", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }magnum-6.1.0/api-ref/source/samples/quota-create-req.json0000666000175100017510000000014613244017334023373 0ustar zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster", "hard_limit": 10 }magnum-6.1.0/api-ref/source/samples/versions-01-get-resp.json0000666000175100017510000000333313244017334024027 0ustar zuulzuul00000000000000{ "media_types":[ { "base":"application/json", "type":"application/vnd.openstack.magnum.v1+json" } ], "links":[ { "href":"http://10.164.180.104:9511/v1/", "rel":"self" }, { "href":"http://docs.openstack.org/developer/magnum/dev/api-spec-v1.html", "type":"text/html", "rel":"describedby" } ], "mservices":[ { "href":"http://10.164.180.104:9511/v1/mservices/", "rel":"self" }, { "href":"http://10.164.180.104:9511/mservices/", "rel":"bookmark" } ], "bays":[ { "href":"http://10.164.180.104:9511/v1/bays/", "rel":"self" }, { "href":"http://10.164.180.104:9511/bays/", "rel":"bookmark" } ], "clustertemplates":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/", "rel":"bookmark" } ], "certificates":[ { "href":"http://10.164.180.104:9511/v1/certificates/", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/", "rel":"bookmark" } ], "clusters":[ { "href":"http://10.164.180.104:9511/v1/clusters/", "rel":"self" }, { "href":"http://10.164.180.104:9511/clusters/", "rel":"bookmark" } ], "baymodels":[ { "href":"http://10.164.180.104:9511/v1/baymodels/", "rel":"self" }, { "href":"http://10.164.180.104:9511/baymodels/", "rel":"bookmark" } ], "id":"v1" }magnum-6.1.0/api-ref/source/samples/baymodel-create-resp.json0000666000175100017510000000232413244017334024220 0ustar zuulzuul00000000000000{ "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"self" }, { "href":"http://10.164.180.104:9511/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm2", "created_at":"2016-08-29T02:08:08+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }magnum-6.1.0/api-ref/source/samples/quota-get-one-resp.json0000666000175100017510000000031713244017334023650 0ustar zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:20+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }magnum-6.1.0/api-ref/source/samples/bay-get-one-resp.json0000666000175100017510000000167413244017334023301 0ustar zuulzuul00000000000000{ "status":"CREATE_COMPLETE", "uuid":"746e779a-751a-456b-a3e9-c883d734946f", "links":[ { "href":"http://10.164.180.104:9511/v1/bays/746e779a-751a-456b-a3e9-c883d734946f", "rel":"self" }, { "href":"http://10.164.180.104:9511/bays/746e779a-751a-456b-a3e9-c883d734946f", "rel":"bookmark" } ], "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", "created_at":"2016-08-29T06:51:31+00:00", "api_address":"https://172.24.4.6:6443", "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", "updated_at":"2016-08-29T06:53:24+00:00", "master_count":1, "coe_version": "v1.2.0", "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", "master_addresses":[ "172.24.4.6" ], "node_count":1, "node_addresses":[ "172.24.4.13" ], "status_reason":"Stack CREATE completed successfully", "bay_create_timeout":60, "name":"k8s" }magnum-6.1.0/api-ref/source/samples/quota-get-all-resp.json0000666000175100017510000000042713244017334023641 0ustar zuulzuul00000000000000{ "quotas": [ { "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:21+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 } ] }magnum-6.1.0/api-ref/source/samples/cluster-get-all-resp.json0000666000175100017510000000135013244017334024165 0ustar zuulzuul00000000000000{ "clusters":[ { "status":"CREATE_IN_PROGRESS", "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "uuid":"731387cf-a92b-4c36-981e-3271d63e5597", "links":[ { "href":"http://10.164.180.104:9511/v1/bays/731387cf-a92b-4c36-981e-3271d63e5597", "rel":"self" }, { "href":"http://10.164.180.104:9511/bays/731387cf-a92b-4c36-981e-3271d63e5597", "rel":"bookmark" } ], "stack_id":"31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", "keypair":"my_keypair", "master_count":1, "create_timeout":60, "node_count":1, "name":"k8s" } ] } magnum-6.1.0/api-ref/source/samples/quota-update-req.json0000777000175100017510000000014613244017334023415 0ustar zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster", "hard_limit": 10 }magnum-6.1.0/api-ref/source/samples/certificates-ca-sign-resp.json0000666000175100017510000000117513244017334025152 0ustar zuulzuul00000000000000{ "pem":"-----BEGIN CERTIFICATE-----\nMIIDxDCCAqygAwIBAgIRALgUbIjdKUy8lqErJmCxVfkwDQYJKoZIhvcNAQELBQAw\n-----END CERTIFICATE-----\n", "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "links":[ { "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"bookmark" } ], "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" }magnum-6.1.0/api-ref/source/samples/quota-delete-req.json0000777000175100017510000000012113244017334023366 0ustar zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster" }magnum-6.1.0/api-ref/source/samples/stats-get-resp.json0000666000175100017510000000004413244017334023073 0ustar zuulzuul00000000000000{ "clusters": 1, "nodes": 2 } magnum-6.1.0/api-ref/source/samples/bay-create-resp.json0000666000175100017510000000006413244017334023176 0ustar zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" }magnum-6.1.0/api-ref/source/samples/cluster-create-req.json0000666000175100017510000000041613244017334023723 0ustar zuulzuul00000000000000{ "name":"k8s", "discovery_url":null, "master_count":2, "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "node_count":2, "create_timeout":60, "keypair":"my_keypair", "master_flavor_id":null, "labels":{ }, "flavor_id":null } magnum-6.1.0/api-ref/source/samples/clustertemplate-update-req.json0000666000175100017510000000026313244017334025476 0ustar zuulzuul00000000000000[ { "path":"/master_lb_enabled", "value":"True", "op":"replace" }, { "path":"/registry_enabled", "value":"True", "op":"replace" } ]magnum-6.1.0/api-ref/source/samples/versions-get-resp.json0000666000175100017510000000064413244017334023613 0ustar zuulzuul00000000000000{ "versions":[ { "status":"CURRENT", "min_version":"1.1", "max_version":"1.4", "id":"v1", "links":[ { "href":"http://10.164.180.104:9511/v1/", "rel":"self" } ] } ], "name":"OpenStack Magnum API", "description":"Magnum is an OpenStack project which aims to provide container management." }magnum-6.1.0/api-ref/source/samples/certificates-ca-sign-req.json0000666000175100017510000000031713244017334024765 0ustar zuulzuul00000000000000{ "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" }magnum-6.1.0/api-ref/source/samples/clustertemplate-get-all-resp.json0000666000175100017510000000300513244017334025720 0ustar zuulzuul00000000000000{ "clustertemplates":[ { "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"0562d357-8641-4759-8fed-8173f02c9633", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm", "created_at":"2016-08-26T09:34:41+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":false, "dns_nameserver":"8.8.8.8" } ] }magnum-6.1.0/api-ref/source/samples/clustertemplate-create-resp.json0000666000175100017510000000234213244017334025641 0ustar zuulzuul00000000000000{ "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm2", "created_at":"2016-08-29T02:08:08+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }magnum-6.1.0/api-ref/source/samples/bay-get-all-resp.json0000666000175100017510000000127313244017334023263 0ustar zuulzuul00000000000000{ "bays":[ { "status":"CREATE_COMPLETE", "uuid":"746e779a-751a-456b-a3e9-c883d734946f", "links":[ { "href":"http://10.164.180.104:9511/v1/bays/746e779a-751a-456b-a3e9-c883d734946f", "rel":"self" }, { "href":"http://10.164.180.104:9511/bays/746e779a-751a-456b-a3e9-c883d734946f", "rel":"bookmark" } ], "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", "master_count":1, "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", "node_count":1, "bay_create_timeout":60, "name":"k8s" } ] }magnum-6.1.0/api-ref/source/samples/bay-create-req.json0000666000175100017510000000025113244017334023012 0ustar zuulzuul00000000000000{ "name":"k8s", "discovery_url":null, "master_count":2, "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", "node_count":2, "bay_create_timeout":60 }magnum-6.1.0/api-ref/source/samples/cluster-create-resp.json0000666000175100017510000000006413244017334024104 0ustar zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" }magnum-6.1.0/api-ref/source/mservices.inc0000666000175100017510000000203513244017334020347 0ustar zuulzuul00000000000000.. -*- rst -*- ===================== Manage Magnum service ===================== List container infrastructure management services ======================================================= .. rest_method:: GET /v1/mservices Enables administrative users to list all Magnum services. Container infrastructure service information include service id, binary, host, report count, creation time, last updated time, health status, and the reason for disabling service. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - mservices: mservices - binary: binary - created_at: created_at - state: state - report_count: report_count - updated_at: updated_at - host: host - disabled_reason: disabled_reason - id: id_s Response Example ---------------- .. literalinclude:: samples/mservice-get-resp.json :language: javascript magnum-6.1.0/contrib/0000775000175100017510000000000013244017675014477 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/0000775000175100017510000000000013244017675016155 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/0000775000175100017510000000000013244017675021066 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/version.py0000666000175100017510000000115313244017334023117 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. version = '1.0.0' driver = 'dcos_centos_v1' container_version = '1.11.2' magnum-6.1.0/contrib/drivers/dcos_centos_v1/scale_manager.py0000666000175100017510000000210713244017334024213 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor.scale_manager import ScaleManager from marathon import MarathonClient class DcosScaleManager(ScaleManager): def __init__(self, context, osclient, cluster): super(DcosScaleManager, self).__init__(context, osclient, cluster) def _get_hosts_with_container(self, context, cluster): marathon_client = MarathonClient( 'http://' + cluster.api_address + '/marathon/') hosts = set() for task in marathon_client.list_tasks(): hosts.add(task.host) return hosts magnum-6.1.0/contrib/drivers/dcos_centos_v1/driver.py0000666000175100017510000000237613244017334022735 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.dcos_centos_v1 import monitor from magnum.drivers.dcos_centos_v1.scale_manager import DcosScaleManager from magnum.drivers.dcos_centos_v1 import template_def from magnum.drivers.heat import driver class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'centos', 'coe': 'dcos'}, ] def get_template_definition(self): return template_def.DcosCentosVMTemplateDefinition() def get_monitor(self, context, cluster): return monitor.DcosMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): return DcosScaleManager(context, osclient, cluster) magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/0000775000175100017510000000000013244017675022150 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/0000775000175100017510000000000013244017675023417 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/0000775000175100017510000000000013244017675026073 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/20-configure-docker-service0000777000175100017510000000163313244017334033121 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # Configure yum to use the Docker yum repo sudo tee /etc/yum.repos.d/docker.repo <<-'EOF' [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/centos/7/ enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF # Configure systemd to run the Docker Daemon with OverlayFS # Manage Docker on CentOS with systemd. # systemd handles starting Docker on boot and restarting it when it crashes. # # Docker 1.11.x will be installed, so issue for Docker 1.12.x on Centos7 # won't happen. # https://github.com/docker/docker/issues/22847 # https://github.com/docker/docker/issues/25098 # sudo mkdir -p /etc/systemd/system/docker.service.d sudo tee /etc/systemd/system/docker.service.d/override.conf <<- 'EOF' [Service] ExecStart= ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd:// EOF magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/10-enable-overlay0000777000175100017510000000116613244017334031142 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # Upgrade CentOS to 7.2 sudo -E yum upgrade --assumeyes --tolerant sudo -E yum update --assumeyes # Verify that the kernel is at least 3.10 function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; } kernel_version=`uname -r | cut --bytes=1-4` expect_version=3.10 if version_gt $expect_version $kernel_version; then echo "Error: kernel version at least $expect_version, current version $kernel_version" exit 1 fi # Enable OverlayFS sudo tee /etc/modules-load.d/overlay.conf <<-'EOF' overlay EOF magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/post-install.d/0000775000175100017510000000000013244017675026272 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/post-install.d/60-enable-docker-service0000777000175100017510000000017213244017334032566 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail sudo systemctl enable docker magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/install.d/0000775000175100017510000000000013244017675025307 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/install.d/50-install-docker0000777000175100017510000000106313244017334030364 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # Install the Docker engine, daemon, and service. # # The supported versions of Docker are: # 1.7.x # 1.8.x # 1.9.x # 1.10.x # 1.11.x # Docker 1.12.x is NOT supported. # Docker 1.9.x - 1.11.x is recommended for stability reasons. # https://github.com/docker/docker/issues/9718 # # See DC/OS installtion guide for details # https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/ # sudo -E yum install -y docker-engine-1.11.2 magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/docker/elements-deps0000666000175100017510000000002113244017334026072 0ustar zuulzuul00000000000000package-installs magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/README.md0000666000175100017510000000526413244017334023430 0ustar zuulzuul00000000000000============= centos-dcos ============= This directory contains `[diskimage-builder](https://github.com/openstack/diskimage-builder)` elements to build an centos image which contains dcos. Pre-requisites to run diskimage-builder --------------------------------------- For diskimage-builder to work, following packages need to be present: * kpartx * qemu-utils * curl * xfsprogs * yum * yum-utils * git For Debian/Ubuntu systems, use:: apt-get install kpartx qemu-utils curl xfsprogs yum yum-utils git For CentOS and Fedora < 22, use:: yum install kpartx qemu-utils curl xfsprogs yum yum-utils git For Fedora >= 22, use:: dnf install kpartx @virtualization curl xfsprogs yum yum-utils git How to generate Centos image with DC/OS 1.8.x --------------------------------------------- 1. Download and export element path git clone https://git.openstack.org/openstack/magnum git clone https://git.openstack.org/openstack/diskimage-builder.git git clone https://git.openstack.org/openstack/dib-utils.git git clone https://git.openstack.org/openstack/tripleo-image-elements.git git clone https://git.openstack.org/openstack/heat-templates.git export PATH="${PWD}/diskimage-builder/bin:$PATH" export PATH="${PWD}/dib-utils/bin:$PATH" export ELEMENTS_PATH=magnum/contrib/drivers/dcos_centos_v1/image export ELEMENTS_PATH=${ELEMENTS_PATH}:diskimage-builder/elements export ELEMENTS_PATH=${ELEMENTS_PATH}:tripleo-image-elements/elements:heat-templates/hot/software-config/elements 2. Export environment path of the url to download dcos_generate_config.sh This default download url is for DC/OS 1.8.4 export DCOS_GENERATE_CONFIG_SRC=https://downloads.dcos.io/dcos/stable/commit/e64024af95b62c632c90b9063ed06296fcf38ea5/dcos_generate_config.sh Or specify local file path export DCOS_GENERATE_CONFIG_SRC=`pwd`/dcos_generate_config.sh 3. Set file system type to `xfs` Only XFS is currently supported for overlay. See https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/#recommendations export FS_TYPE=xfs 4. Create image disk-image-create \ centos7 vm docker dcos selinux-permissive \ os-collect-config os-refresh-config os-apply-config \ heat-config heat-config-script \ -o centos-7-dcos.qcow2 5. (Optional) Create user image for bare metal node Create with elements dhcp-all-interfaces and devuser export DIB_DEV_USER_USERNAME=centos export DIB_DEV_USER_PWDLESS_SUDO=YES disk-image-create \ centos7 vm docker dcos selinux-permissive dhcp-all-interfaces devuser \ os-collect-config os-refresh-config os-apply-config \ heat-config heat-config-script \ -o centos-7-dcos-bm.qcow2 magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/validate_dcos_image.sh0000777000175100017510000000200113244017334026435 0ustar zuulzuul00000000000000#!/bin/bash # # Copyright (c) 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # check that image is valid qemu-img check -q $1 # validate estimated size FILESIZE=$(stat -c%s "$1") MIN_SIZE=1231028224 # 1.15GB MAX_SIZE=1335885824 # 1.25GB if [ $FILESIZE -lt $MIN_SIZE ] ; then echo "Error: generated image size is lower than expected." exit 1 fi if [ $FILESIZE -gt $MAX_SIZE ] ; then echo "Error: generated image size is higher than expected." exit 1 fi magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/0000775000175100017510000000000013244017675023100 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/0000775000175100017510000000000013244017675025753 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-enable-ntp0000666000175100017510000000017013244017334030152 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail sudo systemctl enable ntpd magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-add-norgoup0000777000175100017510000000024713244017334030354 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # nogroup will be used on Mesos masters and agents. sudo groupadd nogroup magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/environment.d/0000775000175100017510000000000013244017675025666 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/environment.d/10-dcos-install-url0000777000175100017510000000052313244017334031220 0ustar zuulzuul00000000000000# Specify download url, default DC/OS version 1.8.4 export DCOS_GENERATE_CONFIG_SRC=${DCOS_GENERATE_CONFIG_SRC:-https://downloads.dcos.io/dcos/stable/commit/e64024af95b62c632c90b9063ed06296fcf38ea5/dcos_generate_config.sh} # or local file path # export DCOS_GENERATE_CONFIG_SRC=${DCOS_GENERATE_CONFIG_SRC:-${PWD}/dcos_generate_config.sh} magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/package-installs.yaml0000666000175100017510000000004213244017334027174 0ustar zuulzuul00000000000000tar: xz: unzip: curl: ipset: ntp: magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/elements-deps0000666000175100017510000000003013244017334025553 0ustar zuulzuul00000000000000package-installs docker magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/0000775000175100017510000000000013244017675025354 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/99-download-generate-config0000777000175100017510000000132713244017334032400 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # This scrpit file is used to dowload dcos_generate_config.sh outside chroot. # Ihis file is essential that the size of dcos_generate_config.sh is more than # 700M, we should download it into the image in advance. sudo mkdir -p $TMP_MOUNT_PATH/opt/dcos if [ -f $DCOS_GENERATE_CONFIG_SRC ]; then # If $DCOS_GENERATE_CONFIG_SRC is a file path, copy the file sudo cp $DCOS_GENERATE_CONFIG_SRC $TMP_MOUNT_PATH/opt/dcos else # If $DCOS_GENERATE_CONFIG_SRC is a url, download it # Please make sure curl is installed on your host environment cd $TMP_MOUNT_PATH/opt/dcos sudo -E curl -O $DCOS_GENERATE_CONFIG_SRC fi magnum-6.1.0/contrib/drivers/dcos_centos_v1/image/install_imagebuild_deps.sh0000777000175100017510000000065713244017334027354 0ustar zuulzuul00000000000000#!/bin/bash # This script installs all needed dependencies to generate # images using diskimage-builder. Please note it only has been # tested on Ubuntu Xenial. set -eux set -o pipefail sudo apt update || true sudo apt install -y \ git \ qemu-utils \ python-dev \ python-yaml \ python-six \ uuid-runtime \ curl \ sudo \ kpartx \ parted \ wget \ xfsprogs \ yum \ yum-utils magnum-6.1.0/contrib/drivers/dcos_centos_v1/README.md0000666000175100017510000001010713244017334022336 0ustar zuulzuul00000000000000How to build a centos image which contains DC/OS 1.8.x ====================================================== Here is the advanced DC/OS 1.8 installation guide. See [Advanced DC/OS Installation Guide] (https://dcos.io/docs/1.8/administration/installing/custom/advanced/) See [Install Docker on CentOS] (https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/) See [Adding agent nodes] (https://dcos.io/docs/1.8/administration/installing/custom/add-a-node/) Create a centos image using DIB following the steps outlined in DC/OS installation guide. 1. Install and configure docker in chroot. 2. Install system requirements in chroot. 3. Download `dcos_generate_config.sh` outside chroot. This file will be used to run `dcos_generate_config.sh --genconf` to generate config files on the node during magnum cluster creation. 4. Some configuration changes are required for DC/OS, i.e disabling the firewalld and adding the group named nogroup. See comments in the script file. Use the centos image to build a DC/OS cluster. Command: `magnum cluster-template-create` `magnum cluster-create` After all the instances with centos image are created. 1. Pass parameters to config.yaml with magnum cluster template properties. 2. Run `dcos_generate_config.sh --genconf` to generate config files. 3. Run `dcos_install.sh master` on master node and `dcos_install.sh slave` on slave node. If we want to scale the DC/OS cluster. Command: `magnum cluster-update` The same steps as cluster creation. 1. Create new instances, generate config files on them and install. 2. Or delete those agent nodes where containers are not running. How to use magnum dcos coe =============================================== We are assuming that magnum has been installed and the magnum path is `/opt/stack/magnum`. 1. Copy dcos magnum coe source code $ mv -r /opt/stack/magnum/contrib/drivers/dcos_centos_v1 /opt/stack/magnum/magnum/drivers/ $ mv /opt/stack/magnum/contrib/drivers/common/dcos_* /opt/stack/magnum/magnum/drivers/common/ $ cd /opt/stack/magnum $ sudo python setup.py install 2. Add driver in setup.cfg dcos_centos_v1 = magnum.drivers.dcos_centos_v1.driver:Driver 3. Restart your magnum services. 4. Prepare centos image with elements dcos and docker installed See how to build a centos image in /opt/stack/magnum/magnum/drivers/dcos_centos_v1/image/README.md 5. Create glance image $ openstack image create centos-7-dcos.qcow2 \ --public \ --disk-format=qcow2 \ --container-format=bare \ --property os_distro=centos \ --file=centos-7-dcos.qcow2 6. Create magnum cluster template Configure DC/OS cluster with --labels See https://dcos.io/docs/1.8/administration/installing/custom/configuration-parameters/ $ magnum cluster-template-create --name dcos-cluster-template \ --image-id centos-7-dcos.qcow2 \ --keypair-id testkey \ --external-network-id public \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.medium \ --labels oauth_enabled=false \ --coe dcos Here is an example to specify the overlay network in DC/OS, 'dcos_overlay_network' should be json string format. $ magnum cluster-template-create --name dcos-cluster-template \ --image-id centos-7-dcos.qcow2 \ --keypair-id testkey \ --external-network-id public \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.medium \ --labels oauth_enabled=false \ --labels dcos_overlay_enable='true' \ --labels dcos_overlay_config_attempts='6' \ --labels dcos_overlay_mtu='9001' \ --labels dcos_overlay_network='{"vtep_subnet": "44.128.0.0/20",\ "vtep_mac_oui": "70:B3:D5:00:00:00","overlays":\ [{"name": "dcos","subnet": "9.0.0.0/8","prefix": 26}]}' \ --coe dcos 7. Create magnum cluster $ magnum cluster-create --name dcos-cluster --cluster-template dcos-cluster-template --node-count 1 8. You need to wait for a while after magnum cluster creation completed to make DC/OS web interface accessible. magnum-6.1.0/contrib/drivers/dcos_centos_v1/monitor.py0000666000175100017510000000542213244017334023124 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from magnum.common import urlfetch from magnum.conductor import monitors class DcosMonitor(monitors.MonitorBase): def __init__(self, context, cluster): super(DcosMonitor, self).__init__(context, cluster) self.data = {} @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, 'cpu_util': { 'unit': '%', 'func': 'compute_cpu_util', }, } # See https://github.com/dcos/adminrouter#ports-summary # Use http:///mesos/ instead of http://:5050 def _build_url(self, url, protocol='http', server_name='mesos', path='/'): return protocol + '://' + url + '/' + server_name + path def _is_leader(self, state): return state['leader'] == state['pid'] def pull_data(self): self.data['mem_total'] = 0 self.data['mem_used'] = 0 self.data['cpu_total'] = 0 self.data['cpu_used'] = 0 for master_addr in self.cluster.master_addresses: mesos_master_url = self._build_url(master_addr, server_name='mesos', path='/state') master = jsonutils.loads(urlfetch.get(mesos_master_url)) if self._is_leader(master): for slave in master['slaves']: self.data['mem_total'] += slave['resources']['mem'] self.data['mem_used'] += slave['used_resources']['mem'] self.data['cpu_total'] += slave['resources']['cpus'] self.data['cpu_used'] += slave['used_resources']['cpus'] break def compute_memory_util(self): if self.data['mem_total'] == 0 or self.data['mem_used'] == 0: return 0 else: return self.data['mem_used'] * 100 / self.data['mem_total'] def compute_cpu_util(self): if self.data['cpu_total'] == 0 or self.data['cpu_used'] == 0: return 0 else: return self.data['cpu_used'] * 100 / self.data['cpu_total'] magnum-6.1.0/contrib/drivers/dcos_centos_v1/template_def.py0000666000175100017510000000176013244017334024067 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from magnum.drivers.heat import dcos_centos_template_def as dctd class DcosCentosVMTemplateDefinition(dctd.DcosCentosTemplateDefinition): """DC/OS template for Centos VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/dcoscluster.yaml') magnum-6.1.0/contrib/drivers/dcos_centos_v1/__init__.py0000666000175100017510000000000013244017334023157 0ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/0000775000175100017510000000000013244017675023064 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/dcosmaster.yaml0000666000175100017510000001102213244017334026102 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single DC/OS master, This stack is included by a ResourceGroup resource in the parent template (dcoscluster.yaml). parameters: server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. secgroup_base_id: type: string description: ID of the security group for base. secgroup_dcos_id: type: string description: ID of the security group for DC/OS master. api_pool_80_id: type: string description: ID of the load balancer pool of Http. api_pool_443_id: type: string description: ID of the load balancer pool of Https. api_pool_8080_id: type: string description: ID of the load balancer pool of Marathon. api_pool_5050_id: type: string description: ID of the load balancer pool of Mesos master. api_pool_2181_id: type: string description: ID of the load balancer pool of Zookeeper. api_pool_8181_id: type: string description: ID of the load balancer pool of Exhibitor. resources: ###################################################################### # # DC/OS master server. # dcos_master: type: OS::Nova::Server properties: image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: SOFTWARE_CONFIG networks: - port: {get_resource: dcos_master_eth0} dcos_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_base_id} - {get_param: secgroup_dcos_id} fixed_ips: - subnet: {get_param: fixed_subnet} replacement_policy: AUTO dcos_master_floating: type: Magnum::Optional::DcosMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: dcos_master_eth0} api_pool_80_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_80_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 80 api_pool_443_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_443_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 443 api_pool_8080_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_8080_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 8080 api_pool_5050_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_5050_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 5050 api_pool_2181_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_2181_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2181 api_pool_8181_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_8181_id} address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 8181 outputs: dcos_master_ip: value: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the DC/OS master node. dcos_master_external_ip: value: {get_attr: [dcos_master_floating, floating_ip_address]} description: > This is the "public" address of the DC/OS master node. dcos_server_id: value: {get_resource: dcos_master} description: > This is the logical id of the DC/OS master node. magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/secgroup.yaml0000666000175100017510000000643513244017334025601 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 parameters: resources: ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # The following is a list of ports used by internal DC/OS components, # and their corresponding systemd unit. # https://dcos.io/docs/1.8/administration/installing/ports/ # # The VIP features, added in DC/OS 1.8, require that ports 32768 - 65535 # are open between all agent and master nodes for both TCP and UDP. # https://dcos.io/docs/1.8/administration/upgrading/ # secgroup_base: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp remote_mode: remote_group_id - protocol: udp remote_mode: remote_group_id # All nodes - protocol: tcp port_range_min: 32768 port_range_max: 65535 # Master nodes - protocol: tcp port_range_min: 53 port_range_max: 53 - protocol: tcp port_range_min: 1050 port_range_max: 1050 - protocol: tcp port_range_min: 1801 port_range_max: 1801 - protocol: tcp port_range_min: 7070 port_range_max: 7070 # dcos-oauth - protocol: tcp port_range_min: 8101 port_range_max: 8101 - protocol: tcp port_range_min: 8123 port_range_max: 8123 - protocol: tcp port_range_min: 9000 port_range_max: 9000 - protocol: tcp port_range_min: 9942 port_range_max: 9942 - protocol: tcp port_range_min: 9990 port_range_max: 9990 - protocol: tcp port_range_min: 15055 port_range_max: 15055 - protocol: udp port_range_min: 53 port_range_max: 53 - protocol: udp port_range_min: 32768 port_range_max: 65535 secgroup_dcos: type: OS::Neutron::SecurityGroup properties: rules: # Admin Router is a customized Nginx that proxies all of the internal # services on port 80 and 443 (if https is configured) # See https://github.com/dcos/adminrouter # If parameter is specified to master_http_loadbalancer, the # load balancer must accept traffic on ports 8080, 5050, 80, and 443, # and forward it to the same ports on the master # Admin Router http - protocol: tcp port_range_min: 80 port_range_max: 80 # Admin Router https - protocol: tcp port_range_min: 443 port_range_max: 443 # Marathon - protocol: tcp port_range_min: 8080 port_range_max: 8080 # Mesos master - protocol: tcp port_range_min: 5050 port_range_max: 5050 # Exhibitor - protocol: tcp port_range_min: 8181 port_range_max: 8181 # Zookeeper - protocol: tcp port_range_min: 2181 port_range_max: 2181 outputs: secgroup_base_id: value: {get_resource: secgroup_base} secgroup_dcos_id: value: {get_resource: secgroup_dcos} magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/fragments/0000775000175100017510000000000013244017675025052 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/fragments/configure-dcos.sh0000666000175100017510000001472313244017334030316 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params GENCONF_SCRIPT_DIR=/opt/dcos sudo mkdir -p $GENCONF_SCRIPT_DIR/genconf sudo chown -R centos $GENCONF_SCRIPT_DIR/genconf # Configure ip-detect cat > $GENCONF_SCRIPT_DIR/genconf/ip-detect < $CONFIG_YAML_FILE # cluster_name echo "cluster_name: $CLUSTER_NAME" >> $CONFIG_YAML_FILE # exhibitor_storage_backend if [ "static" == "$EXHIBITOR_STORAGE_BACKEND" ]; then echo "exhibitor_storage_backend: static" >> $CONFIG_YAML_FILE elif [ "zookeeper" == "$EXHIBITOR_STORAGE_BACKEND" ]; then echo "exhibitor_storage_backend: zookeeper" >> $CONFIG_YAML_FILE echo "exhibitor_zk_hosts: $EXHIBITOR_ZK_HOSTS" >> $CONFIG_YAML_FILE echo "exhibitor_zk_path: $EXHIBITOR_ZK_PATH" >> $CONFIG_YAML_FILE elif [ "aws_s3" == "$EXHIBITOR_STORAGE_BACKEND" ]; then echo "exhibitor_storage_backend: aws_s3" >> $CONFIG_YAML_FILE echo "aws_access_key_id: $AWS_ACCESS_KEY_ID" >> $CONFIG_YAML_FILE echo "aws_region: $AWS_REGIION" >> $CONFIG_YAML_FILE echo "aws_secret_access_key: $AWS_SECRET_ACCESS_KEY" >> $CONFIG_YAML_FILE echo "exhibitor_explicit_keys: $EXHIBITOR_EXPLICIT_KEYS" >> $CONFIG_YAML_FILE echo "s3_bucket: $S3_BUCKET" >> $CONFIG_YAML_FILE echo "s3_prefix: $S3_PREFIX" >> $CONFIG_YAML_FILE elif [ "azure" == "$EXHIBITOR_STORAGE_BACKEND" ]; then echo "exhibitor_storage_backend: azure" >> $CONFIG_YAML_FILE echo "exhibitor_azure_account_name: $EXHIBITOR_AZURE_ACCOUNT_NAME" >> $CONFIG_YAML_FILE echo "exhibitor_azure_account_key: $EXHIBITOR_AZURE_ACCOUNT_KEY" >> $CONFIG_YAML_FILE echo "exhibitor_azure_prefix: $EXHIBITOR_AZURE_PREFIX" >> $CONFIG_YAML_FILE fi # master_discovery if [ "static" == "$MASTER_DISCOVERY" ]; then echo "master_discovery: static" >> $CONFIG_YAML_FILE echo "master_list:" >> $CONFIG_YAML_FILE for ip in $MASTER_LIST; do echo "- ${ip}" >> $CONFIG_YAML_FILE done elif [ "master_http_loadbalancer" == "$MASTER_DISCOVERY" ]; then echo "master_discovery: master_http_loadbalancer" >> $CONFIG_YAML_FILE echo "exhibitor_address: $EXHIBITOR_ADDRESS" >> $CONFIG_YAML_FILE echo "num_masters: $NUM_MASTERS" >> $CONFIG_YAML_FILE echo "master_list:" >> $CONFIG_YAML_FILE for ip in $MASTER_LIST; do echo "- ${ip}" >> $CONFIG_YAML_FILE done fi #################################################### # Networking # dcos_overlay_enable if [ "false" == "$DCOS_OVERLAY_ENABLE" ]; then echo "dcos_overlay_enable: false" >> $CONFIG_YAML_FILE elif [ "true" == "$DCOS_OVERLAY_ENABLE" ]; then echo "dcos_overlay_enable: true" >> $CONFIG_YAML_FILE echo "dcos_overlay_config_attempts: $DCOS_OVERLAY_CONFIG_ATTEMPTS" >> $CONFIG_YAML_FILE echo "dcos_overlay_mtu: $DCOS_OVERLAY_MTU" >> $CONFIG_YAML_FILE echo "dcos_overlay_network:" >> $CONFIG_YAML_FILE echo "$DCOS_OVERLAY_NETWORK" >> $CONFIG_YAML_FILE fi # dns_search if [ -n "$DNS_SEARCH" ]; then echo "dns_search: $DNS_SEARCH" >> $CONFIG_YAML_FILE fi # resolvers echo "resolvers:" >> $CONFIG_YAML_FILE for ip in $RESOLVERS; do echo "- ${ip}" >> $CONFIG_YAML_FILE done # use_proxy if [ -n "$HTTP_PROXY" ] && [ -n "$HTTPS_PROXY" ]; then echo "use_proxy: true" >> $CONFIG_YAML_FILE echo "http_proxy: $HTTP_PROXY" >> $CONFIG_YAML_FILE echo "https_proxy: $HTTPS_PROXY" >> $CONFIG_YAML_FILE if [ -n "$NO_PROXY" ]; then echo "no_proxy:" >> $CONFIG_YAML_FILE for ip in $NO_PROXY; do echo "- ${ip}" >> $CONFIG_YAML_FILE done fi fi #################################################### # Performance and Tuning # check_time if [ "false" == "$CHECK_TIME" ]; then echo "check_time: false" >> $CONFIG_YAML_FILE fi # docker_remove_delay if [ "1" != "$DOCKER_REMOVE_DELAY" ]; then echo "docker_remove_delay: $DOCKER_REMOVE_DELAY" >> $CONFIG_YAML_FILE fi # gc_delay if [ "2" != "$GC_DELAY" ]; then echo "gc_delay: $GC_DELAY" >> $CONFIG_YAML_FILE fi # log_directory if [ "/genconf/logs" != "$LOG_DIRECTORY" ]; then echo "log_directory: $LOG_DIRECTORY" >> $CONFIG_YAML_FILE fi # process_timeout if [ "120" != "$PROCESS_TIMEOUT" ]; then echo "process_timeout: $PROCESS_TIMEOUT" >> $CONFIG_YAML_FILE fi #################################################### # Security And Authentication # oauth_enabled if [ "false" == "$OAUTH_ENABLED" ]; then echo "oauth_enabled: false" >> $CONFIG_YAML_FILE fi # telemetry_enabled if [ "false" == "$TELEMETRY_ENABLED" ]; then echo "telemetry_enabled: false" >> $CONFIG_YAML_FILE fi #################################################### # Rexray Configuration # NOTE: This feature is considered experimental: use it at your own risk. # We might add, change, or delete any functionality as described in this document. # See https://dcos.io/docs/1.8/usage/storage/external-storage/ if [ "$VOLUME_DRIVER" == "rexray" ]; then if [ ${AUTH_URL##*/}=="v3" ]; then extra_configs="domainName: $DOMAIN_NAME" else extra_configs="" fi echo "rexray_config:" >> $CONFIG_YAML_FILE echo " rexray:" >> $CONFIG_YAML_FILE echo " modules:" >> $CONFIG_YAML_FILE echo " default-admin:" >> $CONFIG_YAML_FILE echo " host: tcp://127.0.0.1:61003" >> $CONFIG_YAML_FILE echo " storageDrivers:" >> $CONFIG_YAML_FILE echo " - openstack" >> $CONFIG_YAML_FILE echo " volume:" >> $CONFIG_YAML_FILE echo " mount:" >> $CONFIG_YAML_FILE echo " preempt: $REXRAY_PREEMPT" >> $CONFIG_YAML_FILE echo " openstack:" >> $CONFIG_YAML_FILE echo " authUrl: $AUTH_URL" >> $CONFIG_YAML_FILE echo " username: $USERNAME" >> $CONFIG_YAML_FILE echo " password: $PASSWORD" >> $CONFIG_YAML_FILE echo " tenantName: $TENANT_NAME" >> $CONFIG_YAML_FILE echo " regionName: $REGION_NAME" >> $CONFIG_YAML_FILE echo " availabilityZoneName: nova" >> $CONFIG_YAML_FILE echo " $extra_configs" >> $CONFIG_YAML_FILE fi cd $GENCONF_SCRIPT_DIR sudo bash $GENCONF_SCRIPT_DIR/dcos_generate_config.sh --genconf cd $GENCONF_SCRIPT_DIR/genconf/serve sudo bash $GENCONF_SCRIPT_DIR/genconf/serve/dcos_install.sh --no-block-dcos-setup $ROLES magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/fragments/write-heat-params.sh0000666000175100017510000000275313244017334030741 0ustar zuulzuul00000000000000#!/bin/sh mkdir -p /etc/sysconfig cat > /etc/sysconfig/heat-params < This is a nested stack that defines a single DC/OS slave, This stack is included by a ResourceGroup resource in the parent template (dcoscluster.yaml). parameters: server_image: type: string description: glance image used to boot the server slave_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses wait_condition_timeout: type: number description : > timeout for the Wait Conditions http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker auth_url: type: string description: > url for DC/OS to authenticate before sending request username: type: string description: user name password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file hidden: true tenant_name: type: string description: > tenant_name is used to isolate access to Compute resources volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster domain_name: type: string description: > domain is to define the administrative boundaries for management of Keystone entities fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. secgroup_base_id: type: string description: ID of the security group for base. rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume ###################################################################### # # DC/OS parameters # cluster_name: type: string description: human readable name for the DC/OS cluster default: my-cluster exhibitor_storage_backend: type: string exhibitor_zk_hosts: type: string exhibitor_zk_path: type: string aws_access_key_id: type: string aws_region: type: string aws_secret_access_key: type: string exhibitor_explicit_keys: type: string s3_bucket: type: string s3_prefix: type: string exhibitor_azure_account_name: type: string exhibitor_azure_account_key: type: string exhibitor_azure_prefix: type: string master_discovery: type: string master_list: type: string exhibitor_address: type: string default: 127.0.0.1 num_masters: type: number dcos_overlay_enable: type: string dcos_overlay_config_attempts: type: string dcos_overlay_mtu: type: string dcos_overlay_network: type: string dns_search: type: string resolvers: type: string check_time: type: string docker_remove_delay: type: number gc_delay: type: number log_directory: type: string process_timeout: type: number oauth_enabled: type: string telemetry_enabled: type: string resources: slave_wait_handle: type: OS::Heat::WaitConditionHandle slave_wait_condition: type: OS::Heat::WaitCondition depends_on: dcos_slave properties: handle: {get_resource: slave_wait_handle} timeout: {get_param: wait_condition_timeout} secgroup_all_open: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params.sh} params: "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$TENANT_NAME": {get_param: tenant_name} "$VOLUME_DRIVER": {get_param: volume_driver} "$REGION_NAME": {get_param: region_name} "$DOMAIN_NAME": {get_param: domain_name} "$REXRAY_PREEMPT": {get_param: rexray_preempt} "$CLUSTER_NAME": {get_param: cluster_name} "$EXHIBITOR_STORAGE_BACKEND": {get_param: exhibitor_storage_backend} "$EXHIBITOR_ZK_HOSTS": {get_param: exhibitor_zk_hosts} "$EXHIBITOR_ZK_PATH": {get_param: exhibitor_zk_path} "$AWS_ACCESS_KEY_ID": {get_param: aws_access_key_id} "$AWS_REGION": {get_param: aws_region} "$AWS_SECRET_ACCESS_KEY": {get_param: aws_secret_access_key} "$EXHIBITOR_EXPLICIT_KEYS": {get_param: exhibitor_explicit_keys} "$S3_BUCKET": {get_param: s3_bucket} "$S3_PREFIX": {get_param: s3_prefix} "$EXHIBITOR_AZURE_ACCOUNT_NAME": {get_param: exhibitor_azure_account_name} "$EXHIBITOR_AZURE_ACCOUNT_KEY": {get_param: exhibitor_azure_account_key} "$EXHIBITOR_AZURE_PREFIX": {get_param: exhibitor_azure_prefix} "$MASTER_DISCOVERY": {get_param: master_discovery} "$MASTER_LIST": {get_param: master_list} "$EXHIBITOR_ADDRESS": {get_param: exhibitor_address} "$NUM_MASTERS": {get_param: num_masters} "$DCOS_OVERLAY_ENABLE": {get_param: dcos_overlay_enable} "$DCOS_OVERLAY_CONFIG_ATTEMPTS": {get_param: dcos_overlay_config_attempts} "$DCOS_OVERLAY_MTU": {get_param: dcos_overlay_mtu} "$DCOS_OVERLAY_NETWORK": {get_param: dcos_overlay_network} "$DNS_SEARCH": {get_param: dns_search} "$RESOLVERS": {get_param: resolvers} "$CHECK_TIME": {get_param: check_time} "$DOCKER_REMOVE_DELAY": {get_param: docker_remove_delay} "$GC_DELAY": {get_param: gc_delay} "$LOG_DIRECTORY": {get_param: log_directory} "$PROCESS_TIMEOUT": {get_param: process_timeout} "$OAUTH_ENABLED": {get_param: oauth_enabled} "$TELEMETRY_ENABLED": {get_param: telemetry_enabled} "$ROLES": slave dcos_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-dcos.sh} slave_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [slave_wait_handle, curl_cli]} dcos_slave_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: write_heat_params} - config: {get_resource: dcos_config} - config: {get_resource: slave_wc_notify} ###################################################################### # # a single DC/OS slave. # dcos_slave: type: OS::Nova::Server properties: image: {get_param: server_image} flavor: {get_param: slave_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: dcos_slave_init} networks: - port: {get_resource: dcos_slave_eth0} dcos_slave_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_resource: secgroup_all_open - get_param: secgroup_base_id fixed_ips: - subnet: {get_param: fixed_subnet} dcos_slave_floating: type: Magnum::Optional::DcosSlave::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: dcos_slave_eth0} outputs: dcos_slave_ip: value: {get_attr: [dcos_slave_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the DC/OS slave node. dcos_slave_external_ip: value: {get_attr: [dcos_slave_floating, floating_ip_address]} description: > This is the "public" address of the DC/OS slave node. magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/dcoscluster.yaml0000666000175100017510000004637313244017334026311 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a DC/OS cluster with one or more masters (as specified by number_of_masters, default is 1) and one or more slaves (as specified by the number_of_slaves parameter, which defaults to 1). parameters: cluster_name: type: string description: human readable name for the DC/OS cluster default: my-cluster number_of_masters: type: number description: how many DC/OS masters to spawn initially default: 1 # In DC/OS, there are two types of slave nodes, public and private. # Public slave nodes have external access and private slave nodes don't. # Magnum only supports one type of slave nodes and I decide not to modify # cluster template properties. So I create slave nodes as private agents. number_of_slaves: type: number description: how many DC/OS agents or slaves to spawn initially default: 1 master_flavor: type: string default: m1.medium description: flavor to use when booting the master servers slave_flavor: type: string default: m1.medium description: flavor to use when booting the slave servers server_image: type: string default: centos-dcos description: glance image used to boot the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" fixed_network_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 dns_nameserver: type: string description: address of a dns nameserver reachable in your environment http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" ###################################################################### # # Rexray Configuration # trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true ###################################################################### # # Rexray Configuration # volume_driver: type: string description: volume driver to use for container storage default: "" username: type: string description: user name tenant_name: type: string description: > tenant_name is used to isolate access to cloud resources domain_name: type: string description: > domain is to define the administrative boundaries for management of Keystone entities region_name: type: string description: a logically separate section of the cluster rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" auth_url: type: string description: url for keystone slaves_to_remove: type: comma_delimited_list description: > List of slaves to be removed when doing an update. Individual slave may be referenced several ways: (1) The resource name (e.g.['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing a create. default: [] wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 password: type: string description: > user password, not set in current implementation, only used to fill in for DC/OS config file default: password hidden: true ###################################################################### # # DC/OS parameters # # cluster_name exhibitor_storage_backend: type: string default: "static" exhibitor_zk_hosts: type: string default: "" exhibitor_zk_path: type: string default: "" aws_access_key_id: type: string default: "" aws_region: type: string default: "" aws_secret_access_key: type: string default: "" exhibitor_explicit_keys: type: string default: "" s3_bucket: type: string default: "" s3_prefix: type: string default: "" exhibitor_azure_account_name: type: string default: "" exhibitor_azure_account_key: type: string default: "" exhibitor_azure_prefix: type: string default: "" # master_discovery default set to "static" # If --master-lb-enabled is specified, # master_discovery will be set to "master_http_loadbalancer" master_discovery: type: string default: "static" # master_list # exhibitor_address # num_masters #################################################### # Networking dcos_overlay_enable: type: string default: "" constraints: - allowed_values: - "true" - "false" - "" dcos_overlay_config_attempts: type: string default: "" dcos_overlay_mtu: type: string default: "" dcos_overlay_network: type: string default: "" dns_search: type: string description: > This parameter specifies a space-separated list of domains that are tried when an unqualified domain is entered default: "" # resolvers # use_proxy #################################################### # Performance and Tuning check_time: type: string default: "true" constraints: - allowed_values: - "true" - "false" docker_remove_delay: type: number default: 1 gc_delay: type: number default: 2 log_directory: type: string default: "/genconf/logs" process_timeout: type: number default: 120 #################################################### # Security And Authentication oauth_enabled: type: string default: "true" constraints: - allowed_values: - "true" - "false" telemetry_enabled: type: string default: "true" constraints: - allowed_values: - "true" - "false" resources: ###################################################################### # # network resources. allocate a network and router for our server. # network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} api_lb: type: lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup: type: secgroup.yaml ###################################################################### # # resources that expose the IPs of either the dcos master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [dcos_masters, resource.0.dcos_master_external_ip]} master_private_ip: {get_attr: [dcos_masters, resource.0.dcos_master_ip]} ###################################################################### # # Master SoftwareConfig. # write_params_master: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/write-heat-params.sh} inputs: - name: HTTP_PROXY type: String - name: HTTPS_PROXY type: String - name: NO_PROXY type: String - name: AUTH_URL type: String - name: USERNAME type: String - name: PASSWORD type: String - name: TENANT_NAME type: String - name: VOLUME_DRIVER type: String - name: REGION_NAME type: String - name: DOMAIN_NAME type: String - name: REXRAY_PREEMPT type: String - name: CLUSTER_NAME type: String - name: EXHIBITOR_STORAGE_BACKEND type: String - name: EXHIBITOR_ZK_HOSTS type: String - name: EXHIBITOR_ZK_PATH type: String - name: AWS_ACCESS_KEY_ID type: String - name: AWS_REGION type: String - name: AWS_SECRET_ACCESS_KEY type: String - name: EXHIBITOR_EXPLICIT_KEYS type: String - name: S3_BUCKET type: String - name: S3_PREFIX type: String - name: EXHIBITOR_AZURE_ACCOUNT_NAME type: String - name: EXHIBITOR_AZURE_ACCOUNT_KEY type: String - name: EXHIBITOR_AZURE_PREFIX type: String - name: MASTER_DISCOVERY type: String - name: MASTER_LIST type: String - name: EXHIBITOR_ADDRESS type: String - name: NUM_MASTERS type: String - name: DCOS_OVERLAY_ENABLE type: String - name: DCOS_OVERLAY_CONFIG_ATTEMPTS type: String - name: DCOS_OVERLAY_MTU type: String - name: DCOS_OVERLAY_NETWORK type: String - name: DNS_SEARCH type: String - name: RESOLVERS type: String - name: CHECK_TIME type: String - name: DOCKER_REMOVE_DELAY type: String - name: GC_DELAY type: String - name: LOG_DIRECTORY type: String - name: PROCESS_TIMEOUT type: String - name: OAUTH_ENABLED type: String - name: TELEMETRY_ENABLED type: String - name: ROLES type: String ###################################################################### # # DC/OS configuration SoftwareConfig. # Configuration files are readered and injected into instance. # dcos_config: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/configure-dcos.sh} ###################################################################### # # Master SoftwareDeployment. # write_params_master_deployment: type: OS::Heat::SoftwareDeploymentGroup properties: config: {get_resource: write_params_master} servers: {get_attr: [dcos_masters, attributes, dcos_server_id]} input_values: HTTP_PROXY: {get_param: http_proxy} HTTPS_PROXY: {get_param: https_proxy} NO_PROXY: {get_param: no_proxy} AUTH_URL: {get_param: auth_url} USERNAME: {get_param: username} PASSWORD: {get_param: password} TENANT_NAME: {get_param: tenant_name} VOLUME_DRIVER: {get_param: volume_driver} REGION_NAME: {get_param: region_name} DOMAIN_NAME: {get_param: domain_name} REXRAY_PREEMPT: {get_param: rexray_preempt} CLUSTER_NAME: {get_param: cluster_name} EXHIBITOR_STORAGE_BACKEND: {get_param: exhibitor_storage_backend} EXHIBITOR_ZK_HOSTS: {get_param: exhibitor_zk_hosts} EXHIBITOR_ZK_PATH: {get_param: exhibitor_zk_path} AWS_ACCESS_KEY_ID: {get_param: aws_access_key_id} AWS_REGION: {get_param: aws_region} AWS_SECRET_ACCESS_KEY: {get_param: aws_secret_access_key} EXHIBITOR_EXPLICIT_KEYS: {get_param: exhibitor_explicit_keys} S3_BUCKET: {get_param: s3_bucket} S3_PREFIX: {get_param: s3_prefix} EXHIBITOR_AZURE_ACCOUNT_NAME: {get_param: exhibitor_azure_account_name} EXHIBITOR_AZURE_ACCOUNT_KEY: {get_param: exhibitor_azure_account_key} EXHIBITOR_AZURE_PREFIX: {get_param: exhibitor_azure_prefix} MASTER_DISCOVERY: {get_param: master_discovery} MASTER_LIST: {list_join: [' ', {get_attr: [dcos_masters, dcos_master_ip]}]} EXHIBITOR_ADDRESS: {get_attr: [api_lb, address]} NUM_MASTERS: {get_param: number_of_masters} DCOS_OVERLAY_ENABLE: {get_param: dcos_overlay_enable} DCOS_OVERLAY_CONFIG_ATTEMPTS: {get_param: dcos_overlay_config_attempts} DCOS_OVERLAY_MTU: {get_param: dcos_overlay_mtu} DCOS_OVERLAY_NETWORK: {get_param: dcos_overlay_network} DNS_SEARCH: {get_param: dns_search} RESOLVERS: {get_param: dns_nameserver} CHECK_TIME: {get_param: check_time} DOCKER_REMOVE_DELAY: {get_param: docker_remove_delay} GC_DELAY: {get_param: gc_delay} LOG_DIRECTORY: {get_param: log_directory} PROCESS_TIMEOUT: {get_param: process_timeout} OAUTH_ENABLED: {get_param: oauth_enabled} TELEMETRY_ENABLED: {get_param: telemetry_enabled} ROLES: master dcos_config_deployment: type: OS::Heat::SoftwareDeploymentGroup depends_on: - write_params_master_deployment properties: config: {get_resource: dcos_config} servers: {get_attr: [dcos_masters, attributes, dcos_server_id]} ###################################################################### # # DC/OS masters. This is a resource group that will create # masters. # dcos_masters: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_masters} resource_def: type: dcosmaster.yaml properties: ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} secgroup_base_id: {get_attr: [secgroup, secgroup_base_id]} secgroup_dcos_id: {get_attr: [secgroup, secgroup_dcos_id]} api_pool_80_id: {get_attr: [api_lb, pool_80_id]} api_pool_443_id: {get_attr: [api_lb, pool_443_id]} api_pool_8080_id: {get_attr: [api_lb, pool_8080_id]} api_pool_5050_id: {get_attr: [api_lb, pool_5050_id]} api_pool_2181_id: {get_attr: [api_lb, pool_2181_id]} api_pool_8181_id: {get_attr: [api_lb, pool_8181_id]} ###################################################################### # # DC/OS slaves. This is a resource group that will initially # create public or private slaves, # and needs to be manually scaled. # dcos_slaves: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_slaves} removal_policies: [{resource_list: {get_param: slaves_to_remove}}] resource_def: type: dcosslave.yaml properties: ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} slave_flavor: {get_param: slave_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} wait_condition_timeout: {get_param: wait_condition_timeout} secgroup_base_id: {get_attr: [secgroup, secgroup_base_id]} # DC/OS params auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} tenant_name: {get_param: tenant_name} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} domain_name: {get_param: domain_name} rexray_preempt: {get_param: rexray_preempt} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} cluster_name: {get_param: cluster_name} exhibitor_storage_backend: {get_param: exhibitor_storage_backend} exhibitor_zk_hosts: {get_param: exhibitor_zk_hosts} exhibitor_zk_path: {get_param: exhibitor_zk_path} aws_access_key_id: {get_param: aws_access_key_id} aws_region: {get_param: aws_region} aws_secret_access_key: {get_param: aws_secret_access_key} exhibitor_explicit_keys: {get_param: exhibitor_explicit_keys} s3_bucket: {get_param: s3_bucket} s3_prefix: {get_param: s3_prefix} exhibitor_azure_account_name: {get_param: exhibitor_azure_account_name} exhibitor_azure_account_key: {get_param: exhibitor_azure_account_key} exhibitor_azure_prefix: {get_param: exhibitor_azure_prefix} master_discovery: {get_param: master_discovery} master_list: {list_join: [' ', {get_attr: [dcos_masters, dcos_master_ip]}]} exhibitor_address: {get_attr: [api_lb, address]} num_masters: {get_param: number_of_masters} dcos_overlay_enable: {get_param: dcos_overlay_enable} dcos_overlay_config_attempts: {get_param: dcos_overlay_config_attempts} dcos_overlay_mtu: {get_param: dcos_overlay_mtu} dcos_overlay_network: {get_param: dcos_overlay_network} dns_search: {get_param: dns_search} resolvers: {get_param: dns_nameserver} check_time: {get_param: check_time} docker_remove_delay: {get_param: docker_remove_delay} gc_delay: {get_param: gc_delay} log_directory: {get_param: log_directory} process_timeout: {get_param: process_timeout} oauth_enabled: {get_param: oauth_enabled} telemetry_enabled: {get_param: telemetry_enabled} outputs: api_address: value: {get_attr: [api_address_lb_switch, public_ip]} description: > This is the API endpoint of the DC/OS master. Use this to access the DC/OS API from outside the cluster. dcos_master_private: value: {get_attr: [dcos_masters, dcos_master_ip]} description: > This is a list of the "private" addresses of all the DC/OS masters. dcos_master: value: {get_attr: [dcos_masters, dcos_master_external_ip]} description: > This is the "public" ip address of the DC/OS master server. Use this address to log in to the DC/OS master via ssh or to access the DC/OS API from outside the cluster. dcos_slaves_private: value: {get_attr: [dcos_slaves, dcos_slave_ip]} description: > This is a list of the "private" addresses of all the DC/OS slaves. dcos_slaves: value: {get_attr: [dcos_slaves, dcos_slave_external_ip]} description: > This is a list of the "public" addresses of all the DC/OS slaves. magnum-6.1.0/contrib/drivers/dcos_centos_v1/templates/lb.yaml0000666000175100017510000001176013244017334024344 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 parameters: fixed_subnet: type: string external_network: type: string resources: # Admin Router is a customized Nginx that proxies all of the internal # services on port 80 and 443 (if https is configured) # See https://dcos.io/docs/1.8/administration/installing/custom/configuration-parameters/#-a-name-master-a-master_discovery # If parameter is specified to master_http_loadbalancer, the # load balancer must accept traffic on ports 8080, 5050, 80, and 443, # and forward it to the same ports on the master # # Opening ports 2181 and 8181 are not mentioned in DC/OS document. # When I create a cluster with load balancer, slave nodes will connect to # some services in master nodes with the IP of load balancer, if the port # is not open it will fail. loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: vip_subnet: {get_param: fixed_subnet} listener_80: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: HTTP protocol_port: 80 pool_80: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_80} protocol: HTTP monitor_80: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_80 } listener_443: depends_on: monitor_80 type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: HTTPS protocol_port: 443 pool_443: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_443} protocol: HTTPS monitor_443: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_443 } listener_8080: depends_on: monitor_443 type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: TCP protocol_port: 8080 pool_8080: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_8080} protocol: TCP monitor_8080: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_8080 } listener_5050: depends_on: monitor_8080 type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: TCP protocol_port: 5050 pool_5050: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_5050} protocol: TCP monitor_5050: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_5050 } listener_2181: depends_on: monitor_5050 type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: TCP protocol_port: 2181 pool_2181: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_2181} protocol: TCP monitor_2181: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_2181 } listener_8181: depends_on: monitor_2181 type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: TCP protocol_port: 8181 pool_8181: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener_8181} protocol: TCP monitor_8181: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool_8181 } floating: type: Magnum::Optional::Neutron::LBaaS::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_attr: [loadbalancer, vip_port_id]} outputs: pool_80_id: value: {get_resource: pool_80} pool_443_id: value: {get_resource: pool_443} pool_8080_id: value: {get_resource: pool_8080} pool_5050_id: value: {get_resource: pool_5050} pool_2181_id: value: {get_resource: pool_2181} pool_8181_id: value: {get_resource: pool_8181} address: value: {get_attr: [loadbalancer, vip_address]} floating_address: value: {get_attr: [floating, floating_ip_address]} magnum-6.1.0/contrib/drivers/heat/0000775000175100017510000000000013244017675017076 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/heat/dcos_centos_template_def.py0000666000175100017510000001504413244017334024462 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from magnum.drivers.heat import template_def LOG = logging.getLogger(__name__) class ServerAddressOutputMapping(template_def.OutputMapping): public_ip_output_key = None private_ip_output_key = None def __init__(self, dummy_arg, cluster_attr=None): self.cluster_attr = cluster_attr self.heat_output = self.public_ip_output_key def set_output(self, stack, cluster_template, cluster): if not cluster_template.floating_ip_enabled: self.heat_output = self.private_ip_output_key LOG.debug("Using heat_output: %s", self.heat_output) super(ServerAddressOutputMapping, self).set_output(stack, cluster_template, cluster) class MasterAddressOutputMapping(ServerAddressOutputMapping): public_ip_output_key = 'dcos_master' private_ip_output_key = 'dcos_master_private' class NodeAddressOutputMapping(ServerAddressOutputMapping): public_ip_output_key = 'dcos_slaves' private_ip_output_key = 'dcos_slaves_private' class DcosCentosTemplateDefinition(template_def.BaseTemplateDefinition): """DC/OS template for Centos.""" def __init__(self): super(DcosCentosTemplateDefinition, self).__init__() self.add_parameter('external_network', cluster_template_attr='external_network_id', required=True) self.add_parameter('number_of_slaves', cluster_attr='node_count') self.add_parameter('master_flavor', cluster_template_attr='master_flavor_id') self.add_parameter('slave_flavor', cluster_template_attr='flavor_id') self.add_parameter('cluster_name', cluster_attr='name') self.add_parameter('volume_driver', cluster_template_attr='volume_driver') self.add_output('api_address', cluster_attr='api_address') self.add_output('dcos_master_private', cluster_attr=None) self.add_output('dcos_slaves_private', cluster_attr=None) self.add_output('dcos_slaves', cluster_attr='node_addresses', mapping_type=NodeAddressOutputMapping) self.add_output('dcos_master', cluster_attr='master_addresses', mapping_type=MasterAddressOutputMapping) def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) # HACK(apmelton) - This uses the user's bearer token, ideally # it should be replaced with an actual trust token with only # access to do what the template needs it to do. osc = self.get_osc(context) extra_params['auth_url'] = context.auth_url extra_params['username'] = context.user_name extra_params['tenant_name'] = context.tenant extra_params['domain_name'] = context.domain_name extra_params['region_name'] = osc.cinder_region_name() # Mesos related label parameters are deleted # Because they are not optional in DC/OS configuration label_list = ['rexray_preempt', 'exhibitor_storage_backend', 'exhibitor_zk_hosts', 'exhibitor_zk_path', 'aws_access_key_id', 'aws_region', 'aws_secret_access_key', 'exhibitor_explicit_keys', 's3_bucket', 's3_prefix', 'exhibitor_azure_account_name', 'exhibitor_azure_account_key', 'exhibitor_azure_prefix', 'dcos_overlay_enable', 'dcos_overlay_config_attempts', 'dcos_overlay_mtu', 'dcos_overlay_network', 'dns_search', 'check_time', 'docker_remove_delay', 'gc_delay', 'log_directory', 'process_timeout', 'oauth_enabled', 'telemetry_enabled'] for label in label_list: extra_params[label] = cluster.labels.get(label) # By default, master_discovery is set to 'static' # If --master-lb-enabled is specified, # master_discovery will be set to 'master_http_loadbalancer' if cluster_template.master_lb_enabled: extra_params['master_discovery'] = 'master_http_loadbalancer' if 'true' == extra_params['dcos_overlay_enable']: overlay_obj = jsonutils.loads(extra_params['dcos_overlay_network']) extra_params['dcos_overlay_network'] = ''' vtep_subnet: %s vtep_mac_oui: %s overlays:''' % (overlay_obj['vtep_subnet'], overlay_obj['vtep_mac_oui']) for item in overlay_obj['overlays']: extra_params['dcos_overlay_network'] += ''' - name: %s subnet: %s prefix: %s''' % (item['name'], item['subnet'], item['prefix']) scale_mgr = kwargs.pop('scale_manager', None) if scale_mgr: hosts = self.get_output('dcos_slaves_private') extra_params['slaves_to_remove'] = ( scale_mgr.get_removal_nodes(hosts)) return super(DcosCentosTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def get_env_files(self, cluster_template, cluster): env_files = [] template_def.add_priv_net_env_file(env_files, cluster_template) template_def.add_lb_env_file(env_files, cluster_template) template_def.add_fip_env_file(env_files, cluster_template) return env_files magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/0000775000175100017510000000000013244017675021211 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/version.py0000666000175100017510000000125313244017334023243 0ustar zuulzuul00000000000000# Copyright 2016 - SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'k8s_opensuse_v1' container_version = '1.12.3' magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/driver.py0000666000175100017510000000165013244017334023052 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.common import driver from magnum.drivers.k8s_opensuse_v1 import template_def class Driver(driver.Driver): provides = [ {'server_type': 'vm', 'os': 'opensuse', 'coe': 'kubernetes'}, ] def get_template_definition(self): return template_def.JeOSK8sTemplateDefinition() magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/0000775000175100017510000000000013244017675022273 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/config.sh0000666000175100017510000000624213244017334024072 0ustar zuulzuul00000000000000#!/bin/bash #================ # FILE : config.sh #---------------- # PROJECT : openSUSE KIWI Image System # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved # : # AUTHOR : Marcus Schaefer # : # BELONGS TO : Operating System images # : # DESCRIPTION : configuration script for SUSE based # : operating systems # : # : # STATUS : BETA #---------------- #====================================== # Functions... #-------------------------------------- test -f /.kconfig && . /.kconfig test -f /.profile && . /.profile mkdir /var/lib/misc/reconfig_system #====================================== # Greeting... #-------------------------------------- echo "Configure image: [$name]..." #====================================== # add missing fonts #-------------------------------------- CONSOLE_FONT="lat9w-16.psfu" #====================================== # prepare for setting root pw, timezone #-------------------------------------- echo ** "reset machine settings" sed -i 's/^root:[^:]*:/root:*:/' /etc/shadow rm /etc/machine-id rm /etc/localtime rm /var/lib/zypp/AnonymousUniqueId rm /var/lib/systemd/random-seed #====================================== # SuSEconfig #-------------------------------------- echo "** Running suseConfig..." suseConfig echo "** Running ldconfig..." /sbin/ldconfig #====================================== # Setup baseproduct link #-------------------------------------- suseSetupProduct #====================================== # Specify default runlevel #-------------------------------------- baseSetRunlevel 3 #====================================== # Add missing gpg keys to rpm #-------------------------------------- suseImportBuildKey #====================================== # Firewall Configuration #-------------------------------------- echo '** Configuring firewall...' chkconfig SuSEfirewall2_init on chkconfig SuSEfirewall2_setup on #====================================== # Enable sshd #-------------------------------------- chkconfig sshd on #====================================== # Remove doc files #-------------------------------------- baseStripDocs #====================================== # remove rpms defined in config.xml in the image type=delete section #-------------------------------------- baseStripRPM #====================================== # Sysconfig Update #-------------------------------------- echo '** Update sysconfig entries...' baseUpdateSysConfig /etc/sysconfig/SuSEfirewall2 FW_CONFIGURATIONS_EXT sshd baseUpdateSysConfig /etc/sysconfig/console CONSOLE_FONT "$CONSOLE_FONT" # baseUpdateSysConfig /etc/sysconfig/snapper SNAPPER_CONFIGS root if [[ "${kiwi_iname}" != *"OpenStack"* ]]; then baseUpdateSysConfig /etc/sysconfig/network/dhcp DHCLIENT_SET_HOSTNAME yes fi # true #====================================== # SSL Certificates Configuration #-------------------------------------- echo '** Rehashing SSL Certificates...' update-ca-certificates if [ ! -s /var/log/zypper.log ]; then > /var/log/zypper.log fi # only for debugging #systemctl enable debug-shell.service baseCleanMount exit 0 magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/README.md0000666000175100017510000000244213244017334023546 0ustar zuulzuul00000000000000Build openSUSE Leap 42.1 image for OpenStack Magnum =================================================== This instruction describes how to build manually openSUSE Leap 42.1 image for OpenStack Magnum with Kubernetes packages. Link to the image: http://download.opensuse.org/repositories/Cloud:/Images:/Leap_42.1/images/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64.qcow2 ## Requirements Please install openSUSE (https://www.opensuse.org/) on physical or virtual machine. ## Install packages Install `kiwi` package on openSUSE node, where do you want to build your image `zypper install kiwi` Create destination directory, where image will be build `mkdir /tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` ## Build image Run in current directory with `openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` kiwi template `kiwi --verbose 3 --logfile terminal --build . --destdir /tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` ## Get image After `kiwi` will finish, image can be found in `/tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` directory with name `openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64-1.1.1.qcow2`. Full path `/tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64-1.1.1.qcow2` Have fun !!! ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwimagnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.0000666000175100017510000001424113244017334032326 0ustar zuulzuul00000000000000 SUSE Containers Team docker-devel@suse.de Kubernetes openSUSE Leap 42.1 image for OpenStack Magnum 1.1.1 zypper openSUSE openSUSE true magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/image/images.sh0000666000175100017510000000242313244017334024067 0ustar zuulzuul00000000000000#!/bin/bash #================ # FILE : image.sh #---------------- # PROJECT : openSUSE KIWI Image System # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved # : # AUTHOR : Marcus Schaefer # : # BELONGS TO : Operating System images # : # DESCRIPTION : configuration script for SUSE based # : operating systems # : # : # STATUS : BETA #---------------- test -f /.kconfig && . /.kconfig test -f /.profile && . /.profile if [[ "${kiwi_iname}" = *"OpenStack"* ]]; then # disable jeos-firstboot service # We need to install it because it provides files required in the # overlay for the image. However, the service itself is something that # requires interaction on boot, which is not good for OpenStack, and the # interaction actually doesn't bring any benefit in OpenStack. systemctl mask jeos-firstboot.service # enable cloud-init services suseInsertService cloud-init-local suseInsertService cloud-init suseInsertService cloud-config suseInsertService cloud-final echo '*** adjusting cloud.cfg for openstack' sed -i -e '/mount_default_fields/{adatasource_list: [ NoCloud, OpenStack, None ] }' /etc/cloud/cloud.cfg fi magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/README.md0000666000175100017510000000073613244017334022470 0ustar zuulzuul00000000000000# Magnum openSUSE K8s driver This is openSUSE Kubernetes driver for Magnum, which allow to deploy Kubernetes cluster on openSUSE. ## Installation ### 1. Install the openSUSE K8s driver in Magnum - To install the driver, from this directory run: `python ./setup.py install` ### 2. Enable driver in magnum.conf enabled_definitions = ...,magnum_vm_opensuse_k8s ### 2. Restart Magnum Both Magnum services has to restarted `magnum-api` and `magnum-conductor` magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/template_def.py0000666000175100017510000000514313244017334024211 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import magnum.conf from magnum.drivers.common import k8s_template_def from magnum.drivers.common import template_def CONF = magnum.conf.CONF class JeOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): """Kubernetes template for openSUSE/SLES JeOS VM.""" def __init__(self): super(JeOSK8sTemplateDefinition, self).__init__() self.add_parameter('docker_volume_size', cluster_template_attr='docker_volume_size') self.add_output('kube_minions', cluster_attr='node_addresses') self.add_output('kube_masters', cluster_attr='master_addresses') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) extra_params['username'] = context.user_name extra_params['tenant_name'] = context.tenant return super(JeOSK8sTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def get_env_files(self, cluster_template, cluster): env_files = [] if cluster_template.master_lb_enabled: env_files.append( template_def.COMMON_ENV_PATH + 'with_master_lb.yaml') else: env_files.append( template_def.COMMON_ENV_PATH + 'no_master_lb.yaml') if cluster_template.floating_ip_enabled: env_files.append( template_def.COMMON_ENV_PATH + 'enable_floating_ip.yaml') else: env_files.append( template_def.COMMON_ENV_PATH + 'disable_floating_ip.yaml') return env_files @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/__init__.py0000666000175100017510000000000013244017334023302 0ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/0000775000175100017510000000000013244017675023207 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml0000666000175100017510000002633313244017334026234 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes minion, This stack is included by an AutoScalingGroup resource in the parent template (kubecluster.yaml). parameters: server_image: type: string description: glance image used to boot the server minion_flavor: type: string default: m1.small description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server default: lars external_network: type: string description: uuid/name of a network to use for floating ip addresses kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "false" constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 tls_disabled: type: boolean description: whether or not to enable TLS default: False kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number description : > timeout for the Wait Conditions http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 registry_username: type: string description: username used by docker registry default: "username" registry_password: type: string description: password used by docker registry default: "password" registry_domain: type: string description: domain used by docker registry default: "domain" registry_trust_id: type: string description: trust_id used by docker registry default: "trust_id" registry_auth_url: type: string description: auth_url for keystone default: "auth_url" registry_region: type: string description: region of swift service default: "region" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. kube_minion_id: type: string description: ID of for kubernetes minion. auth_url: type: string description: > url for kubernetes to authenticate before sending request to neutron trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true resources: minion_wait_handle: type: OS::Heat::WaitConditionHandle minion_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-minion properties: handle: {get_resource: minion_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-minion.yaml} params: "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$KUBE_MASTER_IP": {get_param: kube_master_ip} "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$ETCD_SERVER_IP": {get_param: etcd_server_ip} "$DOCKER_VOLUME": {get_resource: docker_volume} "$NETWORK_DRIVER": {get_param: network_driver} "$REGISTRY_ENABLED": {get_param: registry_enabled} "$REGISTRY_PORT": {get_param: registry_port} "$REGISTRY_AUTH_URL": {get_param: registry_auth_url} "$REGISTRY_REGION": {get_param: registry_region} "$REGISTRY_USERNAME": {get_param: registry_username} "$REGISTRY_PASSWORD": {get_param: registry_password} "$REGISTRY_DOMAIN": {get_param: registry_domain} "$REGISTRY_TRUST_ID": {get_param: registry_trust_id} "$REGISTRY_CONTAINER": {get_param: registry_container} "$REGISTRY_INSECURE": {get_param: registry_insecure} "$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_VERSION": {get_param: kube_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$AUTH_URL": {get_param: auth_url} "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_USERNAME": {get_param: trustee_username} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} write_kubeconfig: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/write-kubeconfig.yaml} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert-client.sh} configure_flanneld: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-flanneld-minion.sh} configure_docker: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-docker.sh} create_kubernetes_user: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/create-kubernetes-user.yaml} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-kubernetes-minion.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.sh} minion_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [minion_wait_handle, curl_cli]} kube_minion_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: write_heat_params} - config: {get_resource: write_kubeconfig} - config: {get_resource: make_cert} - config: {get_resource: configure_flanneld} - config: {get_resource: configure_docker} - config: {get_resource: create_kubernetes_user} - config: {get_resource: configure_kubernetes} - config: {get_resource: add_proxy} - config: {get_resource: minion_wc_notify} ###################################################################### # # a single kubernetes minion. # Important: the name for the heat resource kube-minion below must # not contain "_" (underscore) because it will be used in the # hostname. Because DNS domain name does not allow "_", the "_" # will be converted to a "-" and this will make the hostname different # from the Nova instance name. This in turn will break the load # balancer feature in Kubernetes. # kube-minion: type: OS::Nova::Server properties: name: {get_param: kube_minion_id} image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_minion_init} networks: - port: {get_resource: kube_minion_eth0} kube_minion_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_param: secgroup_kube_minion_id fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_minion_floating: type: Magnum::Optional::KubeMinion::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: OS::Cinder::Volume properties: size: {get_param: docker_volume_size} docker_volume_attach: type: OS::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: kube-minion} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. OS::stack_id: value: {get_param: "OS::stack_id"} description: > This is a id of the stack which creates from this template. magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml0000666000175100017510000004717413244017334026432 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Kubernetes cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1). parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server for master nodes minion_flavor: type: string default: m1.small description: flavor to use when booting the server for minions dns_nameserver: type: string description: address of a DNS nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 fixed_network_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 network_driver: type: string description: network driver to use for instantiating container networks default: flannel flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_network_subnet_min: type: string description: minimum subnet default: 10.100.50.0 flannel_network_subnet_max: type: string description: maximum subnet default: 10.100.199.0 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 2400 minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing an create. default: [] discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 registry_username: type: string description: username used by docker registry default: "username" registry_password: type: string description: password used by docker registry default: "password" hidden: true registry_domain: type: string description: domain used by docker registry default: "domain" registry_trust_id: type: string description: trust_id used by docker registry default: "trust_id" hidden: true registry_auth_url: type: string description: auth_url for keystone default: "auth_url" registry_region: type: string description: region of swift service default: "region" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 auth_url: type: string description: > url for kubernetes to authenticate before sending request to neutron must be v2 since kubernetes backend only suppor v2 at this point kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.3.7 volume_driver: type: string description: volume driver to use for container storage default: "" username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file default: ChangeMe hidden: true tenant_name: type: string description: > tenant name loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean description: whether or not to disable TLS default: False kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true domain_name: type: string description: domain_name default: "" resources: ###################################################################### # # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly # fixed_network: type: OS::Neutron::Net properties: name: private fixed_subnet: type: OS::Neutron::Subnet properties: cidr: {get_param: fixed_network_cidr} network: {get_resource: fixed_network} dns_nameservers: - {get_param: dns_nameserver} extrouter: type: OS::Neutron::Router properties: external_gateway_info: network: {get_param: external_network} extrouter_inside: type: OS::Neutron::RouterInterface properties: router_id: {get_resource: extrouter} subnet: {get_resource: fixed_subnet} ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_base: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 secgroup_kube_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 - protocol: tcp port_range_min: 10250 port_range_max: 10250 - protocol: tcp port_range_min: 30000 port_range_max: 32767 - protocol: udp port_range_min: 8285 port_range_max: 8285 - protocol: udp port_range_min: 8472 port_range_max: 8472 secgroup_kube_minion: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # load balancers. # api_loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: vip_subnet: {get_resource: fixed_subnet} api_listener: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: api_loadbalancer} protocol: {get_param: loadbalancing_protocol} protocol_port: {get_param: kubernetes_port} api_pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: api_listener} protocol: {get_param: loadbalancing_protocol} api_monitor: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: api_pool } api_pool_floating: type: Magnum::Optional::Neutron::FloatingIP depends_on: - extrouter_inside properties: floating_network: {get_param: external_network} port_id: {get_attr: [api_loadbalancer, vip_port_id]} etcd_loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: vip_subnet: {get_resource: fixed_subnet} etcd_listener: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: etcd_loadbalancer} protocol: HTTP protocol_port: 2379 etcd_pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: etcd_listener} protocol: HTTP etcd_monitor: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: etcd_pool } ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_pool_floating, floating_ip_address]} pool_private_ip: {get_attr: [api_loadbalancer, vip_address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_loadbalancer, vip_address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # kubernetes masters. This is a resource group that will create # masters. # kube_masters: type: OS::Heat::ResourceGroup depends_on: - extrouter_inside properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: api_public_address: {get_attr: [api_pool_floating, floating_ip_address]} api_private_address: {get_attr: [api_loadbalancer, vip_address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} wait_condition_timeout: {get_param: wait_condition_timeout} network_driver: {get_param: network_driver} flannel_backend: {get_param: flannel_backend} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_network_subnet_min: {get_param: flannel_network_subnet_min} flannel_network_subnet_max: {get_param: flannel_network_subnet_max} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} discovery_url: {get_param: discovery_url} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} fixed_network: {get_resource: fixed_network} fixed_subnet: {get_resource: fixed_subnet} api_pool_id: {get_resource: api_pool} etcd_pool_id: {get_resource: etcd_pool} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} tenant_name: {get_param: tenant_name} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} secgroup_base_id: {get_resource: secgroup_base} secgroup_kube_master_id: {get_resource: secgroup_kube_master} kube_master_id: 'kube-master%index%' kube_master_ports: { get_attr: [kube_master_ports, refs] } kube_master_ips: {get_attr: [kube_master_ports, fixed_ip]} kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} domain_name: {get_param: domain_name} ###################################################################### # # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: type: OS::Heat::ResourceGroup depends_on: - extrouter_inside - kube_masters properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_resource: fixed_network} fixed_subnet: {get_resource: fixed_subnet} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} docker_volume_size: {get_param: docker_volume_size} wait_condition_timeout: {get_param: wait_condition_timeout} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} registry_username: {get_param: registry_username} registry_password: {get_param: registry_password} registry_domain: {get_param: registry_domain} registry_trust_id: {get_param: registry_trust_id} registry_auth_url: {get_param: registry_auth_url} registry_region: {get_param: registry_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} kube_minion_id: 'kube-minion%index%' kube_minion_ports: { get_attr: [kube_minion_ports, refs] } kube_minion_ips: {get_attr: [kube_minion_ports, fixed_ip]} kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } auth_url: {get_param: auth_url} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. registry_address: value: str_replace: template: localhost:port params: port: {get_param: registry_port} description: This is the url of docker registry server where you can store docker images. kube_masters: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions: value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions_external: value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml0000666000175100017510000002572513244017334026242 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes master, This stack is included by an ResourceGroup resource in the parent template (kubecluster.yaml). parameters: server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server default: lars external_network: type: string description: uuid/name of a network to use for floating ip addresses portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "false" constraints: - allowed_values: ["true", "false"] flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_network_subnet_min: type: string description: minimum subnet flannel_network_subnet_max: type: string description: maximum subnet flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean description: whether or not to enable TLS kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. kube_version: type: string description: version of kubernetes used for kubernetes cluster cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks wait_condition_timeout: type: number description : > timeout for the Wait Conditions secgroup_base_id: type: string description: ID of the security group for base. secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. auth_url: type: string description: > url for kubernetes to authenticate username: type: string description: > user account password: type: string description: > user password tenant_name: type: string description: > tenant name kube_master_id: type: string description: ID of for kubernetes master. trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true domain_name: type: string description: domain name resources: master_wait_handle: type: OS::Heat::WaitConditionHandle master_wait_condition: type: OS::Heat::WaitCondition depends_on: kube_master properties: handle: {get_resource: master_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-master.yaml} params: "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$KUBE_MASTER_IPS": {get_param: kube_master_ips_list} "$KUBE_MINION_IPS": {get_param: kube_minion_ips_list} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": { "Fn::Select": [ { get_param: kube_master_index }, { get_param: kube_master_ips} ] } "$KUBE_NODE_NAME": {get_param: kube_master_id} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_NETWORK_SUBNET_MIN": {get_param: flannel_network_subnet_min} "$FLANNEL_NETWORK_SUBNET_MAX": {get_param: flannel_network_subnet_max} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$TENANT_NAME": {get_param: tenant_name} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_VERSION": {get_param: kube_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$DOMAIN_NAME": {get_param: domain_name} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert.sh} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-etcd.sh} configure_flanneld: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-flanneld-master.sh} create_kubernetes_user: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/create-kubernetes-user.yaml} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-kubernetes-master.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.sh} master_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [master_wait_handle, curl_cli]} kube_master_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_etcd} - config: {get_resource: configure_flanneld} - config: {get_resource: create_kubernetes_user} - config: {get_resource: configure_kubernetes} - config: {get_resource: add_proxy} - config: {get_resource: master_wc_notify} ###################################################################### # # a single kubernetes master. # kube_master: type: OS::Nova::Server properties: name: {get_param: kube_master_id} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_master_init} config_drive: true networks: - port: {get_resource: kube_master_eth0} kube_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_base_id} - {get_param: secgroup_kube_master_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_master_floating: type: Magnum::Optional::KubeMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_master_eth0} api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 outputs: kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/0000775000175100017510000000000013244017675025175 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-master.sh0000666000175100017510000000652413244017334033151 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "configuring kubernetes (master)" # Generate ServiceAccount key if needed SERVICE_ACCOUNT_KEY="/var/lib/kubernetes/serviceaccount.key" if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})" openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null fi # Setting correct permissions for Kubernetes files chown -R kube:kube /var/lib/kubernetes KUBE_API_ARGS="--service-account-key-file=$SERVICE_ACCOUNT_KEY --runtime_config=api/all=true" if [ "$TLS_DISABLED" == "True" ]; then sed -i ' /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port='"$KUBE_API_PORT"'"| ' /etc/kubernetes/apiserver else # insecure port is used internaly sed -i ' /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port=8080 --secure-port='"$KUBE_API_PORT"'"| ' /etc/kubernetes/apiserver KUBE_API_ARGS="$KUBE_API_ARGS --tls_cert_file=/etc/kubernetes/ssl/server.crt" KUBE_API_ARGS="$KUBE_API_ARGS --tls_private_key_file=/etc/kubernetes/ssl/server.key" KUBE_API_ARGS="$KUBE_API_ARGS --client_ca_file=/etc/kubernetes/ssl/ca.crt" fi sed -i ' /^KUBE_ALLOW_PRIV=/ s|=.*|="--allow-privileged='"$KUBE_ALLOW_PRIV"'"| ' /etc/kubernetes/config sed -i ' /^KUBE_API_ADDRESS=/ s|=.*|="--advertise-address='"$KUBE_NODE_IP"' --insecure-bind-address=0.0.0.0 --bind_address=0.0.0.0"| /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| /^KUBE_API_ARGS=/ s|=.*|="--service-account-key-file='"$SERVICE_ACCOUNT_KEY"' --runtime-config=api\/all=true"| /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ /^KUBE_ADMISSION_CONTROL=/ s/=.*/="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota"/ ' /etc/kubernetes/apiserver cat >> /etc/kubernetes/apiserver <> /etc/kubernetes/controller-manager < /etc/sysconfig/kubernetes_openstack_config <> /etc/sysconfig/flanneld < $FLANNEL_JSON < $CA_CERT # Create config for client's csr cat > ${cert_dir}/client.conf < ${CLIENT_CERT} chmod 700 ${cert_dir} chmod 600 ${cert_dir}/* chown -R kube:kube ${cert_dir} sed -i ' s|CA_CERT|'"$CA_CERT"'| s|CLIENT_CERT|'"$CLIENT_CERT"'| s|CLIENT_KEY|'"$CLIENT_KEY"'| s|KUBE_MASTER_URI|'"$KUBE_MASTER_URI"'| ' /etc/kubernetes/kubeconfig.yaml magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh0000666000175100017510000000130313244017334030416 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params myip="$KUBE_NODE_IP" sed -i ' /ETCD_NAME=/c ETCD_NAME="'$myip'" /ETCD_DATA_DIR=/c ETCD_DATA_DIR="/var/lib/etcd/default.etcd" /ETCD_LISTEN_CLIENT_URLS=/c ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" /ETCD_LISTEN_PEER_URLS=/c ETCD_LISTEN_PEER_URLS="http://'$myip':2380" /ETCD_ADVERTISE_CLIENT_URLS=/c ETCD_ADVERTISE_CLIENT_URLS="http://'$myip':2379" /ETCD_INITIAL_ADVERTISE_PEER_URLS=/c ETCD_INITIAL_ADVERTISE_PEER_URLS="http://'$myip':2380" /ETCD_DISCOVERY=/c ETCD_DISCOVERY="'$ETCD_DISCOVERY_URL'" ' /etc/sysconfig/etcd echo "activating etcd service" systemctl enable etcd echo "starting etcd service" systemctl --no-block start etcd magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh0000666000175100017510000000722713244017334027403 0ustar zuulzuul00000000000000#!/bin/sh # Copyright 2014 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. . /etc/sysconfig/heat-params set -o errexit set -o nounset set -o pipefail if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) fi if [[ -z "${KUBE_NODE_IP}" ]]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" fi if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" fi MASTER_HOSTNAME=${MASTER_HOSTNAME:-} if [[ -n "${MASTER_HOSTNAME}" ]]; then sans="${sans},DNS:${MASTER_HOSTNAME}" fi sans="${sans},IP:127.0.0.1" cert_dir=/etc/kubernetes/ssl mkdir -p "$cert_dir" CA_CERT=$cert_dir/ca.crt SERVER_CERT=$cert_dir/server.crt SERVER_CSR=$cert_dir/server.csr SERVER_KEY=$cert_dir/server.key #Get a token by user credentials and trust auth_json=$(cat << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } }, "scope": { "OS-TRUST:trust": { "id": "$TRUST_ID" } } } } EOF ) #trust is introduced in Keystone v3 version AUTH_URL=${AUTH_URL/v2.0/v3} content_type='Content-Type: application/json' url="$AUTH_URL/auth/tokens" USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \ | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` # Get CA certificate for this cluster curl -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT} # Create config for server's csr cat > ${cert_dir}/server.conf < ${SERVER_CERT} chmod 700 ${cert_dir} chmod 600 ${cert_dir}/* chown -R kube:kube ${cert_dir} magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh0000666000175100017510000000173213244017334027435 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params DOCKER_PROXY_CONF=/etc/systemd/system/docker.service.d/proxy.conf BASH_RC=/etc/bashrc mkdir -p /etc/systemd/system/docker.service.d if [ -n "$HTTP_PROXY" ]; then cat < $DOCKER_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF systemctl daemon-reload systemctl --no-block restart docker.service if [ -f "$BASH_RC" ]; then echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then if [ -f "$BASH_RC" ]; then echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$NO_PROXY" ]; then if [ -f "$BASH_RC" ]; then echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting no_proxy" fi fi magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/create-kubernetes-user.yaml0000666000175100017510000000033413244017334032437 0ustar zuulzuul00000000000000#cloud-config system_info: default_user: name: kubernetes lock_passwd: true gecos: Kubernetes Interactive User groups: [wheel, systemd-journal] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/bash magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh0000666000175100017510000000324213244017334030752 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "stopping docker" systemctl stop docker ip link del docker0 if [ "$NETWORK_DRIVER" == "flannel" ]; then FLANNEL_ENV=/run/flannel/subnet.env attempts=60 while [[ ! -f $FLANNEL_ENV && $attempts != 0 ]]; do echo "waiting for file $FLANNEL_ENV" sleep 1 let attempts-- done source $FLANNEL_ENV if ! [ "\$FLANNEL_SUBNET" ] && [ "\$FLANNEL_MTU" ] ; then echo "ERROR: missing required environment variables." >&2 exit 1 fi if `grep -q DOCKER_NETWORK_OPTIONS /etc/sysconfig/docker`; then sed -i ' /^DOCKER_NETWORK_OPTIONS=/ s|=.*|="--bip='"$FLANNEL_SUBNET"' --mtu='"$FLANNEL_MTU"'"| ' /etc/sysconfig/docker else echo "DOCKER_NETWORK_OPTIONS=\"--bip=$FLANNEL_SUBNET --mtu=$FLANNEL_MTU\"" >> /etc/sysconfig/docker fi sed -i ' /^DOCKER_OPTS=/ s/=.*/="--storage-driver=btrfs"/ ' /etc/sysconfig/docker fi DOCKER_DEV=/dev/disk/by-id/virtio-${DOCKER_VOLUME:0:20} attempts=60 while [[ ! -b $DOCKER_DEV && $attempts != 0 ]]; do echo "waiting for disk $DOCKER_DEV" sleep 0.5 udevadm trigger let attempts-- done if ! [ -b $DOCKER_DEV ]; then echo "ERROR: device $DOCKER_DEV does not exist" >&2 exit 1 fi mkfs.btrfs $DOCKER_DEV mount $DOCKER_DEV /var/lib/docker # update /etc/fstab with DOCKER_DEV if ! `grep -q /var/lib/docker /etc/fstab`; then grep /var/lib/docker /etc/mtab | head -1 >> /etc/fstab fi # make sure we pick up any modified unit files systemctl daemon-reload echo "activating docker service" systemctl enable docker echo "starting docker service" systemctl --no-block start docker magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-minion.sh0000666000175100017510000000254013244017334033141 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "configuring kubernetes (minion)" myip="$KUBE_NODE_IP" ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP} if [ "$TLS_DISABLED" == "True" ]; then KUBE_PROTOCOL="http" KUBE_CONFIG="" else KUBE_PROTOCOL="https" KUBE_CONFIG="--kubeconfig=/etc/kubernetes/kubeconfig.yaml" fi KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT" sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"| /^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"| ' /etc/kubernetes/config sed -i ' /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ /^KUBELET_HOSTNAME=/ s/=.*/="--hostname-override='"$myip"'"/ /^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"| /^KUBELET_ARGS=/ s|=.*|="--node-ip='"$myip"' --container-runtime=docker --config=/etc/kubernetes/manifests '"$KUBE_CONFIG"'"| ' /etc/kubernetes/kubelet sed -i ' /^KUBE_PROXY_ARGS=/ s|=.*|="--proxy-mode=iptables '"$KUBE_CONFIG"'"| ' /etc/kubernetes/proxy cat >> /etc/environment <> /etc/sysconfig/flanneld < Ready You can log into your minions using the `minion` user as well. You can get a list of minion addresses by running: $ heat output-show my-kube-cluster kube_minions [ "192.168.200.182" ] You can get the docker registry v2 address: $ heat output-show my-kube-cluster registry_address localhost:5000 ## Testing The templates install an example Pod and Service description into `/etc/kubernetes/examples`. You can deploy this with the following commands: $ kubectl create -f /etc/kubernetes/examples/web.service $ kubectl create -f /etc/kubernetes/examples/web.pod This will deploy a minimal webserver and a service. You can use `kubectl get pods` and `kubectl get services` to see the results of these commands. ## License Copyright 2016 SUSE Linux GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ## Contributing Please submit bugs and pull requests via the Gerrit repository at https://review.openstack.org/. For more information, please refer to the following resources: * **Documentation:** http://docs.openstack.org/developer/magnum * **Source:** http://git.openstack.org/cgit/openstack/magnum magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/templates/COPYING0000666000175100017510000002613613244017334024244 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/contrib/drivers/k8s_opensuse_v1/setup.py0000666000175100017510000000221313244017334022713 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2016 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( name="k8s_opensuse_v1", version="1.0", packages=['k8s_opensuse_v1'], package_data={ 'k8s_opensuse_v1': ['templates/*', 'templates/fragments/*'] }, author="SUSE Linux GmbH", author_email="opensuse-cloud@opensuse.org", description="Magnum openSUSE Kubernetes driver", license="Apache", keywords="magnum opensuse driver", entry_points={ 'magnum.template_definitions': [ 'k8s_opensuse_v1 = k8s_opensuse_v1:JeOSK8sTemplateDefinition' ] } ) magnum-6.1.0/contrib/templates/0000775000175100017510000000000013244017675016475 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/templates/example/0000775000175100017510000000000013244017675020130 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/templates/example/README.rst0000666000175100017510000000064113244017334021612 0ustar zuulzuul00000000000000======================== Example Cluster Template ======================== The purpose of this example template is to demonstrate working with cluster templates using magnum service. The Heat template used in this example (example.yaml) provisions a single server instance and does not produce a usable cluster. See ``_ for instructions. magnum-6.1.0/contrib/templates/example/example_template/0000775000175100017510000000000013244017675023456 5ustar zuulzuul00000000000000magnum-6.1.0/contrib/templates/example/example_template/example.yaml0000666000175100017510000000161413244017334025771 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is just an example Heat template. It only provisions a single server instance and does not produce a usable cluster. parameters: # # REQUIRED PARAMETERS # ssh_key_name: type: string description: name of ssh key to be provisioned on our server # # OPTIONAL PARAMETERS # server_image: type: string default: centos-atomic description: glance image used to boot the server server_flavor: type: string default: m1.small description: flavor to use when booting the server resources: example_server: type: "OS::Nova::Server" properties: image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name outputs: server_address: value: {get_attr: [example_server, accessIPv4]} node_addresses: value: [] magnum-6.1.0/contrib/templates/example/example_template/__init__.py0000666000175100017510000000231213244017334025557 0ustar zuulzuul00000000000000# Copyright (c) 2015 Rackspace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from magnum.drivers.common import template_def class ExampleTemplate(template_def.BaseTemplateDefinition): provides = [ {'server_type': 'vm', 'os': 'example', 'coe': 'example_coe'}, {'server_type': 'vm', 'os': 'example2', 'coe': 'example_coe'}, ] def __init__(self): super(ExampleTemplate, self).__init__() self.add_output('server_address', bay_attr='api_address') self.add_output('node_addresses', bay_attr='node_addresses') def template_path(self): return os.path.join(os.path.dirname(__file__), 'example.yaml') magnum-6.1.0/contrib/templates/example/setup.py0000666000175100017510000000215613244017334021640 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2015 Rackspace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( name="ExampleTemplate", version="0.1", packages=['example_template'], install_requires=['magnum'], package_data={ 'example_template': ['example.yaml'] }, author="Me", author_email="me@example.com", description="This is an Example Template", license="Apache", keywords="magnum template example", entry_points={ 'magnum.template_definitions': [ 'example_template = example_template:ExampleTemplate' ] } ) magnum-6.1.0/etc/0000775000175100017510000000000013244017675013612 5ustar zuulzuul00000000000000magnum-6.1.0/etc/magnum/0000775000175100017510000000000013244017675015076 5ustar zuulzuul00000000000000magnum-6.1.0/etc/magnum/api-paste.ini0000666000175100017510000000154413244017334017460 0ustar zuulzuul00000000000000[pipeline:main] pipeline = cors healthcheck http_proxy_to_wsgi request_id osprofiler authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = /, /v1 paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:osprofiler] paste.filter_factory = magnum.common.profiler:WsgiMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/magnum/healthcheck_disable [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project = magnum magnum-6.1.0/etc/magnum/magnum-config-generator.conf0000666000175100017510000000055313244017334022455 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/magnum/magnum.conf.sample wrap_width = 79 namespace = magnum.conf namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = keystonemiddleware.auth_token magnum-6.1.0/etc/magnum/README-magnum.conf.txt0000666000175100017510000000020113244017334020765 0ustar zuulzuul00000000000000To generate the sample magnum.conf file, run the following command from the top level of the magnum directory: tox -egenconfig magnum-6.1.0/etc/magnum/magnum-policy-generator.conf0000666000175100017510000000011013244017334022474 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/magnum/policy.yaml.sample namespace = magnummagnum-6.1.0/.testr.conf0000666000175100017510000000054213244017334015120 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-45} \ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./magnum/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list magnum-6.1.0/magnum/0000775000175100017510000000000013244017675014323 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/version.py0000666000175100017510000000127313244017334016357 0ustar zuulzuul00000000000000# Copyright 2013 - Noorul Islam K M # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('magnum') version_string = version_info.version_string magnum-6.1.0/magnum/conductor/0000775000175100017510000000000013244017675016323 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/tasks/0000775000175100017510000000000013244017675017450 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/tasks/heat_tasks.py0000666000175100017510000000333113244017334022142 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor import tasks class CreateStack(tasks.OSBaseTask): """CreateStack Task This task interfaces with Heat API and creates a stack based on parameters provided to the Task. """ def execute(self, stack_name, parameters, template, files): stack = self.os_client.stacks.create(stack_name=stack_name, parameters=parameters, template=template, files=files) return stack class UpdateStack(tasks.OSBaseTask): """UpdateStack Task This task interfaces with Heat API and update a stack based on parameters provided to the Task. """ def execute(self, stack_id, parameters, template, files): self.os_client.stacks.update(stack_id, parameters=parameters, template=template, files=files) class DeleteStack(tasks.OSBaseTask): """DeleteStack Task This task interfaces with Heat API and delete a stack based on parameters provided to the Task. """ def execute(self, stack_id): self.os_client.stacks.delete(stack_id) magnum-6.1.0/magnum/conductor/tasks/__init__.py0000666000175100017510000000146213244017334021556 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import taskflow.task as task class OSBaseTask(task.Task): def __init__(self, os_client, name=None, **kwargs): self.os_client = os_client super(OSBaseTask, self).__init__(name=name, **kwargs) magnum-6.1.0/magnum/conductor/scale_manager.py0000777000175100017510000000644613244017334021465 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging from magnum.common import exception from magnum.drivers.common.driver import Driver from magnum.i18n import _ from magnum import objects LOG = logging.getLogger(__name__) def get_scale_manager(context, osclient, cluster): cluster_driver = Driver.get_driver_for_cluster(context, cluster) manager = cluster_driver.get_scale_manager(context, osclient, cluster) if not manager: LOG.warning( "Currently only kubernetes and mesos cluster scale manager " "are available") return manager class ScaleManager(object): def __init__(self, context, osclient, cluster): self.context = context self.osclient = osclient self.old_cluster = objects.Cluster.get_by_uuid(context, cluster.uuid) self.new_cluster = cluster def get_removal_nodes(self, hosts_output): if not self._is_scale_down(): return list() cluster = self.new_cluster stack = self.osclient.heat().stacks.get(cluster.stack_id) hosts = hosts_output.get_output_value(stack) if hosts is None: raise exception.MagnumException(_( "Output key '%(output_key)s' is missing from stack " "%(stack_id)s") % {'output_key': hosts_output.heat_output, 'stack_id': stack.id}) hosts_with_container = self._get_hosts_with_container(self.context, cluster) hosts_no_container = list(set(hosts) - hosts_with_container) LOG.debug('List of hosts that has no container: %s', str(hosts_no_container)) num_of_removal = self._get_num_of_removal() if len(hosts_no_container) < num_of_removal: LOG.warning( "About to remove %(num_removal)d nodes, which is larger than " "the number of empty nodes (%(num_empty)d). %(num_non_empty)d " "non-empty nodes will be removed.", { 'num_removal': num_of_removal, 'num_empty': len(hosts_no_container), 'num_non_empty': num_of_removal - len(hosts_no_container)}) hosts_to_remove = hosts_no_container[0:num_of_removal] LOG.info('Require removal of hosts: %s', hosts_to_remove) return hosts_to_remove def _is_scale_down(self): return self.new_cluster.node_count < self.old_cluster.node_count def _get_num_of_removal(self): return self.old_cluster.node_count - self.new_cluster.node_count @abc.abstractmethod def _get_hosts_with_container(self, context, cluster): """Return the hosts with container running on them.""" pass magnum-6.1.0/magnum/conductor/handlers/0000775000175100017510000000000013244017675020123 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/handlers/cluster_conductor.py0000777000175100017510000001643213244017334024241 0ustar zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heatclient import exc from oslo_log import log as logging from pycadf import cadftaxonomy as taxonomy import six from magnum.common import clients from magnum.common import exception from magnum.common import profiler from magnum.conductor.handlers.common import cert_manager from magnum.conductor.handlers.common import trust_manager from magnum.conductor import scale_manager from magnum.conductor import utils as conductor_utils import magnum.conf from magnum.drivers.common import driver from magnum.i18n import _ from magnum import objects from magnum.objects import fields CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc") class Handler(object): def __init__(self): super(Handler, self).__init__() # Cluster Operations def cluster_create(self, context, cluster, create_timeout): LOG.debug('cluster_heat cluster_create') osc = clients.OpenStackClients(context) cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS cluster.status_reason = None cluster.create() try: # Create trustee/trust and set them to cluster trust_manager.create_trustee_and_trust(osc, cluster) # Generate certificate and set the cert reference to cluster cert_manager.generate_certificates_to_cluster(cluster, context=context) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING) # Get driver cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) # Create cluster cluster_driver.create_cluster(context, cluster, create_timeout) cluster.save() except Exception as e: cluster.status = fields.ClusterStatus.CREATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise return cluster def cluster_update(self, context, cluster, rollback=False): LOG.debug('cluster_heat cluster_update') osc = clients.OpenStackClients(context) allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) operation = _('Updating a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) delta = cluster.obj_what_changed() if not delta: return cluster manager = scale_manager.get_scale_manager(context, osc, cluster) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) # Update cluster try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) cluster_driver.update_cluster(context, cluster, manager, rollback) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise cluster.save() return cluster def cluster_delete(self, context, uuid): LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) cluster_driver.delete_cluster(context, cluster) cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.status_reason = None except exc.HTTPNotFound: LOG.info('The cluster %s was not found during cluster' ' deletion.', cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) cluster.status = fields.ClusterStatus.DELETE_FAILED cluster.status_reason = six.text_type(unexp) cluster.save() raise cluster.save() return None magnum-6.1.0/magnum/conductor/handlers/common/0000775000175100017510000000000013244017675021413 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/handlers/common/cert_manager.py0000777000175100017510000001451413244017334024416 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from oslo_log import log as logging import six from magnum.common import cert_manager from magnum.common import exception from magnum.common import short_id from magnum.common.x509 import operations as x509 CONDUCTOR_CLIENT_NAME = six.u('Magnum-Conductor') LOG = logging.getLogger(__name__) def _generate_ca_cert(issuer_name, context=None): """Generate and store ca_cert :param issuer_name: CA subject name :returns: CA cert uuid and CA cert, CA private key password """ ca_password = short_id.generate_id() ca_cert = x509.generate_ca_certificate(issuer_name, encryption_password=ca_password) ca_cert_ref = cert_manager.get_backend().CertManager.store_cert( certificate=ca_cert['certificate'], private_key=ca_cert['private_key'], private_key_passphrase=ca_password, name=issuer_name, context=context, ) LOG.debug('CA cert is created: %s', ca_cert_ref) return ca_cert_ref, ca_cert, ca_password def _generate_client_cert(issuer_name, ca_cert, ca_password, context=None): """Generate and store magnum_client_cert :param issuer_name: CA subject name :param ca_cert: CA certificate :param ca_password: CA private key password :returns: Magnum client cert uuid """ client_password = short_id.generate_id() # TODO(strigazi): set subject name and organization per driver # For RBAC kubernetes cluster we need the client to have: # subject_name: admin # organization_name system:masters # Non kubernetes drivers are not using the certificates fields # for authorization subject_name = 'admin' organization_name = 'system:masters' client_cert = x509.generate_client_certificate( issuer_name, subject_name, organization_name, ca_cert['private_key'], encryption_password=client_password, ca_key_password=ca_password, ) magnum_cert_ref = cert_manager.get_backend().CertManager.store_cert( certificate=client_cert['certificate'], private_key=client_cert['private_key'], private_key_passphrase=client_password, name=CONDUCTOR_CLIENT_NAME, context=context ) LOG.debug('Magnum client cert is created: %s', magnum_cert_ref) return magnum_cert_ref def _get_issuer_name(cluster): issuer_name = cluster.name # When user create a Cluster without name, the cluster.name is None. # We should use cluster.uuid as issuer name. if issuer_name is None: issuer_name = cluster.uuid return issuer_name def generate_certificates_to_cluster(cluster, context=None): """Generate ca_cert and magnum client cert and set to cluster :param cluster: The cluster to set CA cert and magnum client cert :returns: CA cert uuid and magnum client cert uuid """ try: issuer_name = _get_issuer_name(cluster) LOG.debug('Start to generate certificates: %s', issuer_name) ca_cert_ref, ca_cert, ca_password = _generate_ca_cert(issuer_name, context=context) magnum_cert_ref = _generate_client_cert(issuer_name, ca_cert, ca_password, context=context) cluster.ca_cert_ref = ca_cert_ref cluster.magnum_cert_ref = magnum_cert_ref except Exception: LOG.exception('Failed to generate certificates for Cluster: %s', cluster.uuid) raise exception.CertificatesToClusterFailed(cluster_uuid=cluster.uuid) def get_cluster_ca_certificate(cluster, context=None): ca_cert = cert_manager.get_backend().CertManager.get_cert( cluster.ca_cert_ref, resource_ref=cluster.uuid, context=context ) return ca_cert def get_cluster_magnum_cert(cluster, context=None): magnum_cert = cert_manager.get_backend().CertManager.get_cert( cluster.magnum_cert_ref, resource_ref=cluster.uuid, context=context ) return magnum_cert def create_client_files(cluster, context=None): ca_cert = get_cluster_ca_certificate(cluster, context) magnum_cert = get_cluster_magnum_cert(cluster, context) ca_cert_file = tempfile.NamedTemporaryFile() ca_cert_file.write(ca_cert.get_certificate()) ca_cert_file.flush() magnum_key_file = tempfile.NamedTemporaryFile() magnum_key_file.write(magnum_cert.get_decrypted_private_key()) magnum_key_file.flush() magnum_cert_file = tempfile.NamedTemporaryFile() magnum_cert_file.write(magnum_cert.get_certificate()) magnum_cert_file.flush() return ca_cert_file, magnum_key_file, magnum_cert_file def sign_node_certificate(cluster, csr, context=None): ca_cert = cert_manager.get_backend().CertManager.get_cert( cluster.ca_cert_ref, resource_ref=cluster.uuid, context=context ) node_cert = x509.sign(csr, _get_issuer_name(cluster), ca_cert.get_private_key(), ca_cert.get_private_key_passphrase()) return node_cert def delete_certificates_from_cluster(cluster, context=None): """Delete ca cert and magnum client cert from cluster :param cluster: The cluster which has certs """ for cert_ref in ['ca_cert_ref', 'magnum_cert_ref']: try: cert_ref = getattr(cluster, cert_ref, None) if cert_ref: cert_manager.get_backend().CertManager.delete_cert( cert_ref, resource_ref=cluster.uuid, context=context) except Exception: LOG.warning("Deleting certs is failed for Cluster %s", cluster.uuid) magnum-6.1.0/magnum/conductor/handlers/common/trust_manager.py0000777000175100017510000000403713244017334024641 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from magnum.common import exception from magnum.common import utils LOG = logging.getLogger(__name__) def create_trustee_and_trust(osc, cluster): try: password = utils.generate_password(length=18) trustee = osc.keystone().create_trustee( "%s_%s" % (cluster.uuid, cluster.project_id), password, ) cluster.trustee_username = trustee.name cluster.trustee_user_id = trustee.id cluster.trustee_password = password trust = osc.keystone().create_trust( cluster.trustee_user_id) cluster.trust_id = trust.id except Exception: LOG.exception( 'Failed to create trustee and trust for Cluster: %s', cluster.uuid) raise exception.TrusteeOrTrustToClusterFailed( cluster_uuid=cluster.uuid) def delete_trustee_and_trust(osc, context, cluster): try: kst = osc.keystone() # The cluster which is upgraded from Liberty doesn't have trust_id if cluster.trust_id: kst.delete_trust(context, cluster) except Exception: # Exceptions are already logged by keystone().delete_trust pass try: # The cluster which is upgraded from Liberty doesn't have # trustee_user_id if cluster.trustee_user_id: osc.keystone().delete_trustee(cluster.trustee_user_id) except Exception: # Exceptions are already logged by keystone().delete_trustee pass magnum-6.1.0/magnum/conductor/handlers/common/__init__.py0000666000175100017510000000000013244017334023504 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/handlers/federation_conductor.py0000666000175100017510000000220313244017334024664 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common import profiler import magnum.conf CONF = magnum.conf.CONF @profiler.trace_cls("rpc") class Handler(object): def __init__(self): super(Handler, self).__init__() def federation_create(self, context, federation, create_timeout): raise NotImplementedError("This feature is not yet implemented.") def federation_update(self, context, federation, rollback=False): raise NotImplementedError("This feature is not yet implemented.") def federation_delete(self, context, uuid): raise NotImplementedError("This feature is not yet implemented.") magnum-6.1.0/magnum/conductor/handlers/indirection_api.py0000666000175100017510000000617013244017334023633 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging as messaging from magnum.common import profiler from magnum.objects import base @profiler.trace_cls("rpc") class Handler(object): "Indirection API callbacks" def _object_dispatch(self, target, method, context, args, kwargs): """Dispatch a call to an object method. This ensures that object methods get called and any exception that is raised gets wrapped in an ExpectedException for forwarding back to the caller (without spamming the conductor logs). """ try: # NOTE(danms): Keep the getattr inside the try block since # a missing method is really a client problem return getattr(target, method)(context, *args, **kwargs) except Exception: raise messaging.ExpectedException() def object_class_action(self, context, objname, objmethod, objver, args, kwargs): """Perform a classmethod action on an object.""" objclass = base.MagnumObject.obj_class_from_name(objname, objver) result = self._object_dispatch(objclass, objmethod, context, args, kwargs) # NOTE(danms): The RPC layer will convert to primitives for us, # but in this case, we need to honor the version the client is # asking for, so we do it before returning here. return (result.obj_to_primitive(target_version=objver) if isinstance(result, base.MagnumObject) else result) def object_action(self, context, objinst, objmethod, args, kwargs): """Perform an action on an object.""" old_objinst = objinst.obj_clone() result = self._object_dispatch(objinst, objmethod, context, args, kwargs) updates = dict() # NOTE(danms): Diff the object with the one passed to us and # generate a list of changes to forward back for name, field in objinst.fields.items(): if not objinst.obj_attr_is_set(name): # Avoid demand-loading anything continue if (not old_objinst.obj_attr_is_set(name) or getattr(old_objinst, name) != getattr(objinst, name)): updates[name] = field.to_primitive(objinst, name, getattr(objinst, name)) updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result def object_backport(self, context, objinst, target_version): return objinst.obj_to_primitive(target_version=target_version) magnum-6.1.0/magnum/conductor/handlers/__init__.py0000666000175100017510000000000013244017334022214 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/handlers/ca_conductor.py0000666000175100017510000000421413244017334023133 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from magnum.common import profiler from magnum.conductor.handlers.common import cert_manager from magnum.drivers.common import driver from magnum import objects LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc") class Handler(object): """Magnum CA RPC handler. These are the backend operations. They are executed by the backend service. API calls via AMQP (within the ReST API) trigger the handlers to be called. """ def __init__(self): super(Handler, self).__init__() def sign_certificate(self, context, cluster, certificate): LOG.debug("Creating self signed x509 certificate") signed_cert = cert_manager.sign_node_certificate(cluster, certificate.csr, context=context) certificate.pem = signed_cert return certificate def get_ca_certificate(self, context, cluster): ca_cert = cert_manager.get_cluster_ca_certificate(cluster, context=context) certificate = objects.Certificate.from_object_cluster(cluster) certificate.pem = ca_cert.get_certificate() return certificate def rotate_ca_certificate(self, context, cluster): cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) cluster_driver.rotate_ca_certificate(context, cluster) magnum-6.1.0/magnum/conductor/handlers/conductor_listener.py0000666000175100017510000000200213244017334024366 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.common import profiler @profiler.trace_cls("rpc") class Handler(object): '''Listen on an AMQP queue named for the conductor. Allows individual conductors to communicate with each other for multi-conductor support. ''' def ping_conductor(self, context): '''Respond to conductor. Respond affirmatively to confirm that the conductor performing the action is still alive. ''' return True magnum-6.1.0/magnum/conductor/k8s_api.py0000777000175100017510000000477713244017334020247 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from kubernetes import client as k8s_config from kubernetes.client import api_client from kubernetes.client.apis import core_v1_api from oslo_log import log as logging from magnum.conductor.handlers.common.cert_manager import create_client_files LOG = logging.getLogger(__name__) class K8sAPI(core_v1_api.CoreV1Api): def _create_temp_file_with_content(self, content): """Creates temp file and write content to the file. :param content: file content :returns: temp file """ try: tmp = tempfile.NamedTemporaryFile(delete=True) tmp.write(content) tmp.flush() except Exception as err: LOG.error("Error while creating temp file: %s", err) raise return tmp def __init__(self, context, cluster): self.ca_file = None self.cert_file = None self.key_file = None if cluster.magnum_cert_ref: (self.ca_file, self.key_file, self.cert_file) = create_client_files(cluster, context) config = k8s_config.Configuration() config.host = cluster.api_address config.ssl_ca_cert = self.ca_file.name config.cert_file = self.cert_file.name config.key_file = self.key_file.name # build a connection with Kubernetes master client = api_client.ApiClient(configuration=config) super(K8sAPI, self).__init__(client) def __del__(self): if self.ca_file: self.ca_file.close() if self.cert_file: self.cert_file.close() if self.key_file: self.key_file.close() def create_k8s_api(context, cluster): """Create a kubernetes API client Creates connection with Kubernetes master and creates ApivApi instance to call Kubernetes APIs. :param context: The security context :param cluster: Cluster object """ return K8sAPI(context, cluster) magnum-6.1.0/magnum/conductor/monitors.py0000666000175100017510000000347113244017334020546 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_log import log import six from magnum.common import profiler import magnum.conf from magnum.drivers.common.driver import Driver LOG = log.getLogger(__name__) CONF = magnum.conf.CONF @profiler.trace_cls("rpc") @six.add_metaclass(abc.ABCMeta) class MonitorBase(object): def __init__(self, context, cluster): self.context = context self.cluster = cluster @abc.abstractproperty def metrics_spec(self): """Metric specification.""" @abc.abstractmethod def pull_data(self): """Pull data for monitoring.""" def get_metric_names(self): return self.metrics_spec.keys() def get_metric_unit(self, metric_name): return self.metrics_spec[metric_name]['unit'] def compute_metric_value(self, metric_name): func_name = self.metrics_spec[metric_name]['func'] func = getattr(self, func_name) return func() def create_monitor(context, cluster): cluster_driver = Driver.get_driver_for_cluster(context, cluster) monitor = cluster_driver.get_monitor(context, cluster) if monitor: return monitor LOG.debug("Cannot create monitor with cluster type '%s'", cluster.cluster_template.coe) return None magnum-6.1.0/magnum/conductor/api.py0000666000175100017510000001063613244017334017446 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API for interfacing with Magnum Backend.""" from magnum.common import profiler from magnum.common import rpc_service import magnum.conf CONF = magnum.conf.CONF # The Backend API class serves as a AMQP client for communicating # on a topic exchange specific to the conductors. This allows the ReST # API to trigger operations on the conductors @profiler.trace_cls("rpc") class API(rpc_service.API): def __init__(self, transport=None, context=None, topic=None): super(API, self).__init__(transport, context, topic=CONF.conductor.topic) # Cluster Operations def cluster_create(self, cluster, create_timeout): return self._call('cluster_create', cluster=cluster, create_timeout=create_timeout) def cluster_create_async(self, cluster, create_timeout): self._cast('cluster_create', cluster=cluster, create_timeout=create_timeout) def cluster_delete(self, uuid): return self._call('cluster_delete', uuid=uuid) def cluster_delete_async(self, uuid): self._cast('cluster_delete', uuid=uuid) def cluster_update(self, cluster): return self._call('cluster_update', cluster=cluster) def cluster_update_async(self, cluster, rollback=False): self._cast('cluster_update', cluster=cluster, rollback=rollback) # Federation Operations def federation_create(self, federation, create_timeout): return self._call('federation_create', federation=federation, create_timeout=create_timeout) def federation_create_async(self, federation, create_timeout): self._cast('federation_create', federation=federation, create_timeout=create_timeout) def federation_delete(self, uuid): return self._call('federation_delete', uuid=uuid) def federation_delete_async(self, uuid): self._cast('federation_delete', uuid=uuid) def federation_update(self, federation): return self._call('federation_update', federation=federation) def federation_update_async(self, federation, rollback=False): self._cast('federation_update', federation=federation, rollback=rollback) # CA operations def sign_certificate(self, cluster, certificate): return self._call('sign_certificate', cluster=cluster, certificate=certificate) def get_ca_certificate(self, cluster): return self._call('get_ca_certificate', cluster=cluster) def rotate_ca_certificate(self, cluster): return self._call('rotate_ca_certificate', cluster=cluster) # Versioned Objects indirection API def object_class_action(self, context, objname, objmethod, objver, args, kwargs): "Indirection API callback" return self._client.call(context, 'object_class_action', objname=objname, objmethod=objmethod, objver=objver, args=args, kwargs=kwargs) def object_action(self, context, objinst, objmethod, args, kwargs): "Indirection API callback" return self._client.call(context, 'object_action', objinst=objinst, objmethod=objmethod, args=args, kwargs=kwargs) def object_backport(self, context, objinst, target_version): "Indirection API callback" return self._client.call(context, 'object_backport', objinst=objinst, target_version=target_version) @profiler.trace_cls("rpc") class ListenerAPI(rpc_service.API): def __init__(self, context=None, topic=None, server=None, timeout=None): super(ListenerAPI, self).__init__(context=context, topic=topic, server=server, timeout=timeout) def ping_conductor(self): return self._call('ping_conductor') magnum-6.1.0/magnum/conductor/__init__.py0000666000175100017510000000000013244017334020414 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/conductor/utils.py0000666000175100017510000000657713244017334020046 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import eventfactory from pycadf import resource from magnum.common import clients from magnum.common import rpc from magnum.objects import cluster from magnum.objects import cluster_template def retrieve_cluster(context, cluster_ident): if not uuidutils.is_uuid_like(cluster_ident): return cluster.Cluster.get_by_name(context, cluster_ident) else: return cluster.Cluster.get_by_uuid(context, cluster_ident) def retrieve_cluster_template(context, cluster): return cluster_template.ClusterTemplate.get_by_uuid( context, cluster.cluster_template_id) def retrieve_cluster_uuid(context, cluster_ident): if not uuidutils.is_uuid_like(cluster_ident): cluster_obj = cluster.Cluster.get_by_name(context, cluster_ident) return cluster_obj.uuid else: return cluster_ident def object_has_stack(context, cluster_uuid): osc = clients.OpenStackClients(context) obj = retrieve_cluster(context, cluster_uuid) stack = osc.heat().stacks.get(obj.stack_id) if (stack.stack_status == 'DELETE_COMPLETE' or stack.stack_status == 'DELETE_IN_PROGRESS'): return False return True def _get_request_audit_info(context): """Collect audit information about the request used for CADF. :param context: Request context :returns: Auditing data about the request :rtype: :class:'pycadf.Resource' """ user_id = None project_id = None domain_id = None if context: user_id = context.user_id project_id = context.project_id domain_id = context.domain_id initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) if user_id: initiator.user_id = user_id if project_id: initiator.project_id = project_id if domain_id: initiator.domain_id = domain_id return initiator def notify_about_cluster_operation(context, action, outcome): """Send a notification about cluster operation. :param action: CADF action being audited :param outcome: CADF outcome """ notifier = rpc.get_notifier() event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=_get_request_audit_info(context), target=resource.Resource(typeURI='service/magnum/cluster'), observer=resource.Resource(typeURI='service/magnum/cluster')) service = 'magnum' event_type = '%(service)s.cluster.%(action)s' % { 'service': service, 'action': action} payload = event.as_dict() if outcome == taxonomy.OUTCOME_FAILURE: method = notifier.error else: method = notifier.info method(context, event_type, payload) magnum-6.1.0/magnum/i18n.py0000666000175100017510000000176113244017334015453 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ import oslo_i18n DOMAIN = 'magnum' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) magnum-6.1.0/magnum/objects/0000775000175100017510000000000013244017675015754 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/objects/certificate.py0000666000175100017510000000302513244017334020602 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.objects import base @base.MagnumObjectRegistry.register class Certificate(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version # Version 1.1: Rename bay_uuid to cluster_uuid VERSION = '1.1' fields = { 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'cluster_uuid': fields.StringField(nullable=True), 'csr': fields.StringField(nullable=True), 'pem': fields.StringField(nullable=True), } @classmethod def from_object_cluster(cls, cluster): return cls(project_id=cluster.project_id, user_id=cluster.user_id, cluster_uuid=cluster.uuid) @classmethod def from_db_cluster(cls, cluster): return cls(project_id=cluster['project_id'], user_id=cluster['user_id'], cluster_uuid=cluster['uuid']) magnum-6.1.0/magnum/objects/federation.py0000666000175100017510000002123613244017334020444 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class Federation(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): """Represents a Federation object. Version 1.0: Initial Version """ VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'hostcluster_id': fields.StringField(nullable=True), 'member_ids': fields.ListOfStringsField(nullable=True), 'status': m_fields.FederationStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'properties': fields.DictOfStringsField(nullable=True) } @staticmethod def _from_db_object(federation, db_federation): """Converts a database entity to a formal object.""" for field in federation.fields: federation[field] = db_federation[field] federation.obj_reset_changes() return federation @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Federation._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, federation_id): """Find a federation based on its id or uuid and return it. :param federation_id: the id *or* uuid of a federation. :param context: Security context :returns: a :class:`Federation` object. """ if strutils.is_int_like(federation_id): return cls.get_by_id(context, federation_id) elif uuidutils.is_uuid_like(federation_id): return cls.get_by_uuid(context, federation_id) else: raise exception.InvalidIdentity(identity=federation_id) @base.remotable_classmethod def get_by_id(cls, context, federation_id): """Find a federation based on its integer id and return it. :param federation_id: the id of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_id(context, federation_id) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a federation based on uuid and return it. :param uuid: the uuid of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_uuid(context, uuid) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def get_count_all(cls, context, filters=None): """Get count of matching federation. :param context: The security context :param filters: filter dict, can includes 'name', 'project_id', 'hostcluster_id', 'member_ids', 'status' (should be a status list). :returns: Count of matching federation. """ return cls.dbapi.get_federation_count_all(context, filters=filters) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a federation based on name and return a Federation object. :param name: the logical name of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_name(context, name) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Federation objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'name', 'project_id', 'hostcluster_id', 'member_ids', 'status' (should be a status list). :returns: a list of :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Federation._from_db_object_list(db_federation, cls, context) @base.remotable def create(self, context=None): """Create a Federation record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ values = self.obj_get_changes() db_federation = self.dbapi.create_federation(values) self._from_db_object(self, db_federation) @base.remotable def destroy(self, context=None): """Delete the Federation from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ self.dbapi.destroy_federation(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Federation. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ updates = self.obj_get_changes() self.dbapi.update_federation(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Load updates for this Federation. Loads a Federation with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded Federation column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] magnum-6.1.0/magnum/objects/stats.py0000666000175100017510000000254013244017334017457 0ustar zuulzuul00000000000000# coding=utf-8 # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class Stats(base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'clusters': fields.IntegerField(), 'nodes': fields.IntegerField(nullable=True) } @base.remotable_classmethod def get_cluster_stats(cls, context, project_id=None): """Return cluster stats for the given project. :param context: The security context :param project_id: project id """ clusters, nodes = cls.dbapi.get_cluster_stats(context, project_id) return cls(clusters=clusters, nodes=nodes) magnum-6.1.0/magnum/objects/x509keypair.py0000666000175100017510000001767313244017334020430 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class X509KeyPair(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version # Version 1.1: Added new method get_x509keypair_by_bay_uuid # Version 1.2: Remove bay_uuid, name, ca_cert and add intermediates # and private_key_passphrase VERSION = '1.2' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'certificate': fields.StringField(nullable=True), 'private_key': fields.StringField(nullable=True), 'intermediates': fields.StringField(nullable=True), 'private_key_passphrase': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(x509keypair, db_x509keypair): """Converts a database entity to a formal object.""" for field in x509keypair.fields: setattr(x509keypair, field, db_x509keypair[field]) x509keypair.obj_reset_changes() return x509keypair @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [X509KeyPair._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, x509keypair_id): """Find a X509KeyPair based on its id or uuid. Find X509KeyPair by id or uuid and return a X509KeyPair object. :param x509keypair_id: the id *or* uuid of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ if strutils.is_int_like(x509keypair_id): return cls.get_by_id(context, x509keypair_id) elif uuidutils.is_uuid_like(x509keypair_id): return cls.get_by_uuid(context, x509keypair_id) else: raise exception.InvalidIdentity(identity=x509keypair_id) @base.remotable_classmethod def get_by_id(cls, context, x509keypair_id): """Find a X509KeyPair based on its integer id. Find X509KeyPair by id and return a X509KeyPair object. :param x509keypair_id: the id of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ db_x509keypair = cls.dbapi.get_x509keypair_by_id(context, x509keypair_id) x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) return x509keypair @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a x509keypair based on uuid and return a :class:`X509KeyPair` object. :param uuid: the uuid of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ db_x509keypair = cls.dbapi.get_x509keypair_by_uuid(context, uuid) x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) return x509keypair @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of X509KeyPair objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can include 'x509keypairmodel_id', 'project_id', 'user_id'. :returns: a list of :class:`X509KeyPair` object. """ db_x509keypairs = cls.dbapi.get_x509keypair_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return X509KeyPair._from_db_object_list(db_x509keypairs, cls, context) @base.remotable def create(self, context=None): """Create a X509KeyPair record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ values = self.obj_get_changes() db_x509keypair = self.dbapi.create_x509keypair(values) self._from_db_object(self, db_x509keypair) @base.remotable def destroy(self, context=None): """Delete the X509KeyPair from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ self.dbapi.destroy_x509keypair(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this X509KeyPair. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ updates = self.obj_get_changes() self.dbapi.update_x509keypair(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this X509KeyPair. Loads a x509keypair with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded x509keypair column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and \ getattr(self, field) != getattr(current, field): setattr(self, field, getattr(current, field)) magnum-6.1.0/magnum/objects/cluster_template.py0000666000175100017510000002522513244017334021702 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class ClusterTemplate(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add 'registry_enabled' field # Version 1.2: Added 'network_driver' field # Version 1.3: Added 'labels' attribute # Version 1.4: Added 'insecure' attribute # Version 1.5: Changed type of 'coe' from StringField to BayTypeField # Version 1.6: Change 'insecure' to 'tls_disabled' # Version 1.7: Added 'public' field # Version 1.8: Added 'server_type' field # Version 1.9: Added 'volume_driver' field # Version 1.10: Removed 'ssh_authorized_key' field # Version 1.11: Added 'insecure_registry' field # Version 1.12: Added 'docker_storage_driver' field # Version 1.13: Added 'master_lb_enabled' field # Version 1.14: Added 'fixed_subnet' field # Version 1.15: Added 'floating_ip_enabled' field # Version 1.16: Renamed the class from "BayModel' to 'ClusterTemplate' # Version 1.17: 'coe' field type change to ClusterTypeField # Version 1.18: DockerStorageDriver is a StringField (was an Enum) VERSION = '1.18' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'image_id': fields.StringField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'master_flavor_id': fields.StringField(nullable=True), 'keypair_id': fields.StringField(nullable=True), 'dns_nameserver': fields.StringField(nullable=True), 'external_network_id': fields.StringField(nullable=True), 'fixed_network': fields.StringField(nullable=True), 'fixed_subnet': fields.StringField(nullable=True), 'network_driver': fields.StringField(nullable=True), 'volume_driver': fields.StringField(nullable=True), 'apiserver_port': fields.IntegerField(nullable=True), 'docker_volume_size': fields.IntegerField(nullable=True), 'docker_storage_driver': fields.StringField(nullable=True), 'cluster_distro': fields.StringField(nullable=True), 'coe': m_fields.ClusterTypeField(nullable=True), 'http_proxy': fields.StringField(nullable=True), 'https_proxy': fields.StringField(nullable=True), 'no_proxy': fields.StringField(nullable=True), 'registry_enabled': fields.BooleanField(default=False), 'labels': fields.DictOfStringsField(nullable=True), 'tls_disabled': fields.BooleanField(default=False), 'public': fields.BooleanField(default=False), 'server_type': fields.StringField(nullable=True), 'insecure_registry': fields.StringField(nullable=True), 'master_lb_enabled': fields.BooleanField(default=False), 'floating_ip_enabled': fields.BooleanField(default=True), } @staticmethod def _from_db_object(cluster_template, db_cluster_template): """Converts a database entity to a formal object.""" for field in cluster_template.fields: cluster_template[field] = db_cluster_template[field] cluster_template.obj_reset_changes() return cluster_template @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [ClusterTemplate._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, cluster_template_id): """Find and return ClusterTemplate object based on its id or uuid. :param cluster_template_id: the id *or* uuid of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ if strutils.is_int_like(cluster_template_id): return cls.get_by_id(context, cluster_template_id) elif uuidutils.is_uuid_like(cluster_template_id): return cls.get_by_uuid(context, cluster_template_id) else: raise exception.InvalidIdentity(identity=cluster_template_id) @base.remotable_classmethod def get_by_id(cls, context, cluster_template_id): """Find and return ClusterTemplate object based on its integer id. :param cluster_template_id: the id of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_id( context, cluster_template_id) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find and return ClusterTemplate object based on uuid. :param uuid: the uuid of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_uuid( context, uuid) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def get_by_name(cls, context, name): """Find and return ClusterTemplate object based on name. :param name: the name of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_name(context, name) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of ClusterTemplate objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ClusterTemplate` object. """ db_cluster_templates = cls.dbapi.get_cluster_template_list( context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return ClusterTemplate._from_db_object_list(db_cluster_templates, cls, context) @base.remotable def create(self, context=None): """Create a ClusterTemplate record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ values = self.obj_get_changes() db_cluster_template = self.dbapi.create_cluster_template(values) self._from_db_object(self, db_cluster_template) @base.remotable def destroy(self, context=None): """Delete the ClusterTemplate from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ self.dbapi.destroy_cluster_template(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this ClusterTemplate. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ updates = self.obj_get_changes() self.dbapi.update_cluster_template(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this ClusterTemplate. Loads a ClusterTemplate with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded ClusterTemplate column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] magnum-6.1.0/magnum/objects/base.py0000666000175100017510000000572013244017334017236 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum common internal object model""" from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import fields as ovoo_fields remotable_classmethod = ovoo_base.remotable_classmethod remotable = ovoo_base.remotable class MagnumObjectRegistry(ovoo_base.VersionedObjectRegistry): pass class MagnumObject(ovoo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'magnum_object' OBJ_PROJECT_NAMESPACE = 'magnum' def as_dict(self): return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} class MagnumObjectDictCompat(ovoo_base.VersionedObjectDictCompat): pass class MagnumPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ fields = { 'created_at': ovoo_fields.DateTimeField(nullable=True), 'updated_at': ovoo_fields.DateTimeField(nullable=True), } class MagnumObjectIndirectionAPI(ovoo_base.VersionedObjectIndirectionAPI): def __init__(self): super(MagnumObjectIndirectionAPI, self).__init__() from magnum.conductor import api as conductor_api self._conductor = conductor_api.API() def object_action(self, context, objinst, objmethod, args, kwargs): return self._conductor.object_action(context, objinst, objmethod, args, kwargs) def object_class_action(self, context, objname, objmethod, objver, args, kwargs): return self._conductor.object_class_action(context, objname, objmethod, objver, args, kwargs) def object_backport(self, context, objinst, target_version): return self._conductor.object_backport(context, objinst, target_version) class MagnumObjectSerializer(ovoo_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = MagnumObject magnum-6.1.0/magnum/objects/__init__.py0000666000175100017510000000254013244017334020060 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.objects import certificate from magnum.objects import cluster from magnum.objects import cluster_template from magnum.objects import federation from magnum.objects import magnum_service from magnum.objects import quota from magnum.objects import stats from magnum.objects import x509keypair Cluster = cluster.Cluster ClusterTemplate = cluster_template.ClusterTemplate MagnumService = magnum_service.MagnumService Quota = quota.Quota X509KeyPair = x509keypair.X509KeyPair Certificate = certificate.Certificate Stats = stats.Stats Federation = federation.Federation __all__ = (Cluster, ClusterTemplate, MagnumService, X509KeyPair, Certificate, Stats, Quota, Federation ) magnum-6.1.0/magnum/objects/fields.py0000666000175100017510000001157313244017334017575 0ustar zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields class ClusterStatus(fields.Enum): CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' CREATE_FAILED = 'CREATE_FAILED' CREATE_COMPLETE = 'CREATE_COMPLETE' UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' UPDATE_FAILED = 'UPDATE_FAILED' UPDATE_COMPLETE = 'UPDATE_COMPLETE' DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' DELETE_FAILED = 'DELETE_FAILED' DELETE_COMPLETE = 'DELETE_COMPLETE' RESUME_COMPLETE = 'RESUME_COMPLETE' RESUME_FAILED = 'RESUME_FAILED' RESTORE_COMPLETE = 'RESTORE_COMPLETE' ROLLBACK_IN_PROGRESS = 'ROLLBACK_IN_PROGRESS' ROLLBACK_FAILED = 'ROLLBACK_FAILED' ROLLBACK_COMPLETE = 'ROLLBACK_COMPLETE' SNAPSHOT_COMPLETE = 'SNAPSHOT_COMPLETE' CHECK_COMPLETE = 'CHECK_COMPLETE' ADOPT_COMPLETE = 'ADOPT_COMPLETE' ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE, RESUME_COMPLETE, RESUME_FAILED, RESTORE_COMPLETE, ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE, SNAPSHOT_COMPLETE, CHECK_COMPLETE, ADOPT_COMPLETE) STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, DELETE_FAILED, ROLLBACK_FAILED, RESUME_FAILED) def __init__(self): super(ClusterStatus, self).__init__(valid_values=ClusterStatus.ALL) class FederationStatus(fields.Enum): CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' CREATE_FAILED = 'CREATE_FAILED' CREATE_COMPLETE = 'CREATE_COMPLETE' UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' UPDATE_FAILED = 'UPDATE_FAILED' UPDATE_COMPLETE = 'UPDATE_COMPLETE' DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' DELETE_FAILED = 'DELETE_FAILED' DELETE_COMPLETE = 'DELETE_COMPLETE' ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE) STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, DELETE_FAILED) def __init__(self): super(FederationStatus, self).__init__( valid_values=FederationStatus.ALL) class ContainerStatus(fields.Enum): ALL = ( ERROR, RUNNING, STOPPED, PAUSED, UNKNOWN, ) = ( 'Error', 'Running', 'Stopped', 'Paused', 'Unknown', ) def __init__(self): super(ContainerStatus, self).__init__( valid_values=ContainerStatus.ALL) class ClusterType(fields.Enum): ALL = ( KUBERNETES, SWARM, MESOS, DCOS, SWARM_MODE, ) = ( 'kubernetes', 'swarm', 'mesos', 'dcos', 'swarm-mode', ) def __init__(self): super(ClusterType, self).__init__(valid_values=ClusterType.ALL) class QuotaResourceName(fields.Enum): ALL = ( CLUSTER, ) = ( 'Cluster', ) def __init__(self): super(QuotaResourceName, self).__init__( valid_values=QuotaResourceName.ALL) class ServerType(fields.Enum): ALL = ( VM, BM, ) = ( 'vm', 'bm', ) def __init__(self): super(ServerType, self).__init__( valid_values=ServerType.ALL) class MagnumServiceState(fields.Enum): ALL = ( up, down ) = ( 'up', 'down', ) def __init__(self): super(MagnumServiceState, self).__init__( valid_values=MagnumServiceState.ALL) class MagnumServiceBinary(fields.Enum): ALL = ( magnum_conductor ) = ( 'magnum-conductor', ) def __init__(self): super(MagnumServiceBinary, self).__init__( valid_values=MagnumServiceBinary.ALL) class ListOfDictsField(fields.AutoTypedField): AUTO_TYPE = fields.List(fields.Dict(fields.FieldType())) class ClusterStatusField(fields.BaseEnumField): AUTO_TYPE = ClusterStatus() class MagnumServiceField(fields.BaseEnumField): AUTO_TYPE = MagnumServiceState() class MagnumServiceBinaryField(fields.BaseEnumField): AUTO_TYPE = MagnumServiceBinary() class ContainerStatusField(fields.BaseEnumField): AUTO_TYPE = ContainerStatus() class ClusterTypeField(fields.BaseEnumField): AUTO_TYPE = ClusterType() class ServerTypeField(fields.BaseEnumField): AUTO_TYPE = ServerType() class FederationStatusField(fields.BaseEnumField): AUTO_TYPE = FederationStatus() magnum-6.1.0/magnum/objects/magnum_service.py0000666000175100017510000001361213244017334021327 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class MagnumService(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'report_count': fields.IntegerField(), } @staticmethod def _from_db_object(magnum_service, db_magnum_service): """Converts a database entity to a formal object.""" for field in magnum_service.fields: setattr(magnum_service, field, db_magnum_service[field]) magnum_service.obj_reset_changes() return magnum_service @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [MagnumService._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get_by_host_and_binary(cls, context, host, binary): """Find a magnum_service based on its hostname and binary. :param host: The host on which the binary is running. :param binary: The name of the binary. :param context: Security context. :returns: a :class:`MagnumService` object. """ db_magnum_service = cls.dbapi.get_magnum_service_by_host_and_binary( host, binary) if db_magnum_service is None: return None magnum_service = MagnumService._from_db_object( cls(context), db_magnum_service) return magnum_service @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of MagnumService objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`MagnumService` object. """ db_magnum_services = cls.dbapi.get_magnum_service_list( limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return MagnumService._from_db_object_list(db_magnum_services, cls, context) @base.remotable def create(self, context=None): """Create a MagnumService record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ values = self.obj_get_changes() db_magnum_service = self.dbapi.create_magnum_service(values) self._from_db_object(self, db_magnum_service) @base.remotable def destroy(self, context=None): """Delete the MagnumService from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ self.dbapi.destroy_magnum_service(self.id) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this MagnumService. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ updates = self.obj_get_changes() self.dbapi.update_magnum_service(self.id, updates) self.obj_reset_changes() @base.remotable def report_state_up(self, context=None): """Touching the magnum_service record to show aliveness. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ self.report_count += 1 self.save() magnum-6.1.0/magnum/objects/cluster.py0000666000175100017510000002740513244017334020011 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base from magnum.objects.cluster_template import ClusterTemplate from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class Cluster(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'bay_create_timeout' field # Version 1.2: Add 'registry_trust_id' field # Version 1.3: Added 'baymodel' field # Version 1.4: Added more types of status to bay's status field # Version 1.5: Rename 'registry_trust_id' to 'trust_id' # Add 'trustee_user_name', 'trustee_password', # 'trustee_user_id' field # Version 1.6: Add rollback support for Bay # Version 1.7: Added 'coe_version' and 'container_version' fields # Version 1.8: Rename 'baymodel' to 'cluster_template' # Version 1.9: Rename table name from 'bay' to 'cluster' # Rename 'baymodel_id' to 'cluster_template_id' # Rename 'bay_create_timeout' to 'create_timeout' # Version 1.10: Added 'keypair' field # Version 1.11: Added 'RESUME_FAILED' in status field # Version 1.12: Added 'get_stats' method # Version 1.13: Added get_count_all method # Version 1.14: Added 'docker_volume_size' field # Version 1.15: Added 'labels' field # Version 1.16: Added 'master_flavor_id' field # Version 1.17: Added 'flavor_id' field VERSION = '1.17' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'cluster_template_id': fields.StringField(nullable=True), 'keypair': fields.StringField(nullable=True), 'docker_volume_size': fields.IntegerField(nullable=True), 'labels': fields.DictOfStringsField(nullable=True), 'master_flavor_id': fields.StringField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'stack_id': fields.StringField(nullable=True), 'status': m_fields.ClusterStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'create_timeout': fields.IntegerField(nullable=True), 'api_address': fields.StringField(nullable=True), 'node_addresses': fields.ListOfStringsField(nullable=True), 'node_count': fields.IntegerField(nullable=True), 'master_count': fields.IntegerField(nullable=True), 'discovery_url': fields.StringField(nullable=True), 'master_addresses': fields.ListOfStringsField(nullable=True), 'ca_cert_ref': fields.StringField(nullable=True), 'magnum_cert_ref': fields.StringField(nullable=True), 'cluster_template': fields.ObjectField('ClusterTemplate'), 'trust_id': fields.StringField(nullable=True), 'trustee_username': fields.StringField(nullable=True), 'trustee_password': fields.StringField(nullable=True), 'trustee_user_id': fields.StringField(nullable=True), 'coe_version': fields.StringField(nullable=True), 'container_version': fields.StringField(nullable=True) } @staticmethod def _from_db_object(cluster, db_cluster): """Converts a database entity to a formal object.""" for field in cluster.fields: if field != 'cluster_template': cluster[field] = db_cluster[field] # Note(eliqiao): The following line needs to be placed outside the # loop because there is a dependency from cluster_template to # cluster_template_id. The cluster_template_id must be populated # first in the loop before it can be used to find the cluster_template. cluster['cluster_template'] = ClusterTemplate.get_by_uuid( cluster._context, cluster.cluster_template_id) cluster.obj_reset_changes() return cluster @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Cluster._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, cluster_id): """Find a cluster based on its id or uuid and return a Cluster object. :param cluster_id: the id *or* uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ if strutils.is_int_like(cluster_id): return cls.get_by_id(context, cluster_id) elif uuidutils.is_uuid_like(cluster_id): return cls.get_by_uuid(context, cluster_id) else: raise exception.InvalidIdentity(identity=cluster_id) @base.remotable_classmethod def get_by_id(cls, context, cluster_id): """Find a cluster based on its integer id and return a Cluster object. :param cluster_id: the id of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a cluster based on uuid and return a :class:`Cluster` object. :param uuid: the uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_count_all(cls, context, filters=None): """Get count of matching clusters. :param context: The security context :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: Count of matching clusters. """ return cls.dbapi.get_cluster_count_all(context, filters=filters) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a cluster based on name and return a Cluster object. :param name: the logical name of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_name(context, name) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Cluster objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: a list of :class:`Cluster` object. """ db_clusters = cls.dbapi.get_cluster_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Cluster._from_db_object_list(db_clusters, cls, context) @base.remotable_classmethod def get_stats(cls, context, project_id=None): """Return a list of Cluster objects. :param context: Security context. :param project_id: project id """ return cls.dbapi.get_cluster_stats(project_id) @base.remotable def create(self, context=None): """Create a Cluster record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ values = self.obj_get_changes() db_cluster = self.dbapi.create_cluster(values) self._from_db_object(self, db_cluster) @base.remotable def destroy(self, context=None): """Delete the Cluster from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ self.dbapi.destroy_cluster(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Cluster. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ updates = self.obj_get_changes() self.dbapi.update_cluster(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this Cluster. Loads a Cluster with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded Cluster column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] magnum-6.1.0/magnum/objects/quota.py0000666000175100017510000001253313244017334017455 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class Quota(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'project_id': fields.StringField(nullable=False), 'resource': fields.StringField(nullable=False), 'hard_limit': fields.IntegerField(nullable=False), } @base.remotable_classmethod def get_quota_by_project_id_resource(cls, context, project_id, resource): """Find a quota based on its integer id and return a Quota object. :param project_id: the id of a project. :param resource: resource name. :param context: Security context :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.get_quota_by_project_id_resource(project_id, resource) quota = Quota._from_db_object(cls(context), db_quota) return quota @staticmethod def _from_db_object(quota, db_quota): """Converts a database entity to a formal object.""" for field in quota.fields: setattr(quota, field, db_quota[field]) quota.obj_reset_changes() return quota @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Quota._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get_by_id(cls, context, quota_id): """Find a quota based on its integer id and return a Quota object. :param quota_id: the id of a quota. :param context: Security context :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.get_quota_by_id(context, quota_id) quota = Quota._from_db_object(cls(context), db_quota) return quota @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Quota objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'project_id', 'resource'. :returns: a list of :class:`Quota` object. """ db_quotas = cls.dbapi.get_quota_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Quota._from_db_object_list(db_quotas, cls, context) @base.remotable_classmethod def quota_get_all_by_project_id(cls, context, project_id): """Find a quota based on project id. :param project_id: the project id. :param context: Security context :returns: a :class:`Quota` object. """ quotas = cls.dbapi.get_quota_by_project_id(context, project_id) return Quota._from_db_object_list(quotas, cls, context) @base.remotable def create(self, context=None): """Save a quota based on project id. :param context: security context. :returns: a :class:`Quota` object. """ values = self.obj_get_changes() db_quota = self.dbapi.create_quota(values) self._from_db_object(self, db_quota) @base.remotable def delete(self, context=None): """Delete the quota from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Quota(context) """ self.dbapi.delete_quota(self.project_id, self.resource) self.obj_reset_changes() @base.remotable_classmethod def update_quota(cls, context, project_id, quota): """Save a quota based on project id. :param quota: quota. :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.update_quota(project_id, quota) return Quota._from_db_object(cls(context), db_quota) magnum-6.1.0/magnum/common/0000775000175100017510000000000013244017675015613 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/profiler.py0000666000175100017510000000474213244017334020010 0ustar zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ### # This code is taken from nova. Goal is minimal modification. ### from oslo_log import log as logging from oslo_utils import importutils import webob.dec from magnum.common import context import magnum.conf profiler = importutils.try_import("osprofiler.profiler") profiler_initializer = importutils.try_import("osprofiler.initializer") profiler_web = importutils.try_import("osprofiler.web") CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class WsgiMiddleware(object): def __init__(self, application, **kwargs): self.application = application @classmethod def factory(cls, global_conf, **local_conf): if profiler_web: return profiler_web.WsgiMiddleware.factory(global_conf, **local_conf) def filter_(app): return cls(app, **local_conf) return filter_ @webob.dec.wsgify def __call__(self, request): return request.get_response(self.application) def setup(binary, host): if hasattr(CONF, 'profiler') and CONF.profiler.enabled: profiler_initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="magnum", service=binary, host=host) LOG.info("OSprofiler is enabled.") def trace_cls(name, **kwargs): """Wrap the OSprofiler trace_cls. Wrap the OSprofiler trace_cls decorator so that it will not try to patch the class unless OSprofiler is present. :param name: The name of action. For example, wsgi, rpc, db, ... :param kwargs: Any other keyword args used by profiler.trace_cls """ def decorator(cls): if profiler and 'profiler' in CONF: trace_decorator = profiler.trace_cls(name, kwargs) return trace_decorator(cls) return cls return decorator magnum-6.1.0/magnum/common/urlfetch.py0000777000175100017510000000540513244017334020002 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility for fetching a resource (e.g. a manifest) from a URL.""" from oslo_log import log as logging import requests from requests import exceptions from six.moves import urllib from magnum.common import exception import magnum.conf from magnum.i18n import _ CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class URLFetchError(exception.Invalid, IOError): pass def get(url, allowed_schemes=('http', 'https')): """Get the data at the specified URL. The URL must use the http: or https: schemes. The file: scheme is also supported if you override the allowed_schemes argument. Raise an IOError if getting the data fails. """ LOG.info('Fetching data from %s', url) components = urllib.parse.urlparse(url) if components.scheme not in allowed_schemes: raise URLFetchError(_('Invalid URL scheme %s') % components.scheme) if components.scheme == 'file': # nosec try: return urllib.request.urlopen(url).read() except urllib.error.URLError as uex: raise URLFetchError(_('Failed to retrieve manifest: %s') % uex) try: resp = requests.get(url, stream=True) resp.raise_for_status() # We cannot use resp.text here because it would download the # entire file, and a large enough file would bring down the # engine. The 'Content-Length' header could be faked, so it's # necessary to download the content in chunks to until # max_manifest_size is reached. The chunk_size we use needs # to balance CPU-intensive string concatenation with accuracy # (eg. it's possible to fetch 1000 bytes greater than # max_manifest_size with a chunk_size of 1000). reader = resp.iter_content(chunk_size=1000) result = "" for chunk in reader: result += chunk if len(result) > CONF.max_manifest_size: raise URLFetchError("Manifest exceeds maximum allowed" "size (%s bytes)" % CONF.max_manifest_size) return result except exceptions.RequestException as ex: raise URLFetchError(_('Failed to retrieve manifest: %s') % ex) magnum-6.1.0/magnum/common/cert_manager/0000775000175100017510000000000013244017675020242 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/cert_manager/cert_manager.py0000666000175100017510000000542713244017334023245 0ustar zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Certificate manager API """ import abc import six from magnum.common.x509 import operations @six.add_metaclass(abc.ABCMeta) class Cert(object): """Base class to represent all certificates.""" @abc.abstractmethod def get_certificate(self): """Returns the certificate.""" pass @abc.abstractmethod def get_intermediates(self): """Returns the intermediate certificates.""" pass @abc.abstractmethod def get_private_key(self): """Returns the private key for the certificate.""" pass def get_decrypted_private_key(self): """Returns the decrypted private key for the certificate.""" return operations.decrypt_key(self.get_private_key(), self.get_private_key_passphrase()) @abc.abstractmethod def get_private_key_passphrase(self): """Returns the passphrase for the private key.""" pass @six.add_metaclass(abc.ABCMeta) class CertManager(object): """Base Cert Manager Interface A Cert Manager is responsible for managing certificates for TLS. """ @abc.abstractmethod def store_cert(self, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='Magnum TLS Cert', **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert and returns its UUID that identifies it within the cert manager. If storage of the certificate data fails, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def get_cert(self, cert_uuid, check_only=False, **kwargs): """Retrieves the specified cert. If check_only is True, don't perform any sort of registration. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def delete_cert(self, cert_uuid, **kwargs): """Deletes the specified cert. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass magnum-6.1.0/magnum/common/cert_manager/local_cert_manager.py0000666000175100017510000001643513244017334024420 0ustar zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from os import path import uuid from oslo_log import log as logging from magnum.common.cert_manager import cert_manager from magnum.common import exception import magnum.conf from magnum.i18n import _ LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class Cert(cert_manager.Cert): """Representation of a Cert for local storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase class CertManager(cert_manager.CertManager): """Cert Manager Interface that stores data locally. This Cert Manager should be used for testing purpose. """ @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to the filesystem and returns a UUID that can be used to retrieve it. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert :raises CertificateStorageException: if certificate storage fails """ cert_ref = str(uuid.uuid4()) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) LOG.warning( "Storing certificate data on the local filesystem. " "CertManager type 'local' should be used for testing purpose." ) try: filename_certificate = "{0}.crt".format(filename_base) with open(filename_certificate, 'w') as cert_file: cert_file.write(certificate) filename_private_key = "{0}.key".format(filename_base) with open(filename_private_key, 'w') as key_file: key_file.write(private_key) if intermediates: filename_intermediates = "{0}.int".format(filename_base) with open(filename_intermediates, 'w') as int_file: int_file.write(intermediates) if private_key_passphrase: filename_pkp = "{0}.pass".format(filename_base) with open(filename_pkp, 'w') as pass_file: pass_file.write(private_key_passphrase) except IOError as ioe: LOG.error("Failed to store certificate.") raise exception.CertificateStorageException(msg=str(ioe)) return cert_ref @staticmethod def get_cert(cert_ref, **kwargs): """Retrieves the specified cert. :param cert_ref: the UUID of the cert to retrieve :return: magnum.common.cert_manager.cert_manager.Cert representation of the certificate data :raises CertificateStorageException: if certificate retrieval fails """ LOG.warning( "Loading certificate {0} from the local filesystem. " "CertManager type 'local' should be used for testing purpose." .format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) cert_data = dict() try: with open(filename_certificate, 'r') as cert_file: cert_data['certificate'] = cert_file.read() except IOError: LOG.error( "Failed to read certificate for {0}." .format(cert_ref)) raise exception.CertificateStorageException( msg=_("Certificate could not be read.") ) try: with open(filename_private_key, 'r') as key_file: cert_data['private_key'] = key_file.read() except IOError: LOG.error( "Failed to read private key for {0}." .format(cert_ref)) raise exception.CertificateStorageException( msg=_("Private Key could not be read.") ) try: if path.isfile(filename_intermediates): with open(filename_intermediates, 'r') as int_file: cert_data['intermediates'] = int_file.read() except IOError as ioe: LOG.error("Failed to read certificate.") raise exception.CertificateStorageException(msg=str(ioe)) try: if path.isfile(filename_pkp): with open(filename_pkp, 'r') as pass_file: cert_data['private_key_passphrase'] = pass_file.read() except IOError as ioe: LOG.error("Failed to read certificate.") raise exception.CertificateStorageException(msg=str(ioe)) return Cert(**cert_data) @staticmethod def delete_cert(cert_ref, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises CertificateStorageException: if certificate deletion fails """ LOG.warning( "Deleting certificate {0} from the local filesystem. " "CertManager type 'local' should be used for testing purpose." .format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) if path.isfile(filename_intermediates): os.remove(filename_intermediates) if path.isfile(filename_pkp): os.remove(filename_pkp) except IOError as ioe: LOG.error( "Failed to delete certificate {0}." .format(cert_ref)) raise exception.CertificateStorageException(msg=str(ioe)) magnum-6.1.0/magnum/common/cert_manager/x509keypair_cert_manager.py0000666000175100017510000000710313244017334025410 0ustar zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common.cert_manager import cert_manager from magnum import objects class Cert(cert_manager.Cert): """Representation of a Cert for Magnum DB storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase class CertManager(cert_manager.CertManager): """Cert Manager Interface that stores data locally in Magnum db. """ @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, context=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to x509keypair model and returns a UUID that can be used to retrieve it. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert """ x509keypair = {'certificate': certificate, 'private_key': private_key, 'private_key_passphrase': private_key_passphrase, 'intermediates': intermediates, 'project_id': context.project_id, 'user_id': context.user_id} x509keypair_obj = objects.X509KeyPair(context, **x509keypair) x509keypair_obj.create() return x509keypair_obj.uuid @staticmethod def get_cert(cert_ref, context=None, **kwargs): """Retrieves the specified cert. :param cert_ref: the UUID of the cert to retrieve :return: magnum.common.cert_manager.cert_manager.Cert representation of the certificate data """ cert_data = dict() x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) cert_data['certificate'] = x509keypair_obj.certificate cert_data['private_key'] = x509keypair_obj.private_key cert_data['private_key_passphrase'] = \ x509keypair_obj.private_key_passphrase cert_data['intermediates'] = x509keypair_obj.intermediates return Cert(**cert_data) @staticmethod def delete_cert(cert_ref, context=None, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete """ x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) x509keypair_obj.destroy() magnum-6.1.0/magnum/common/cert_manager/__init__.py0000666000175100017510000000173013244017334022346 0ustar zuulzuul00000000000000# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stevedore import driver import magnum.conf CONF = magnum.conf.CONF _CERT_MANAGER_PLUGIN = None def get_backend(): global _CERT_MANAGER_PLUGIN if not _CERT_MANAGER_PLUGIN: _CERT_MANAGER_PLUGIN = driver.DriverManager( "magnum.cert_manager.backend", CONF.certificates.cert_manager_type).driver return _CERT_MANAGER_PLUGIN magnum-6.1.0/magnum/common/cert_manager/barbican_cert_manager.py0000666000175100017510000002100513244017334025054 0ustar zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import exceptions as barbican_exc from barbicanclient.v1 import client as barbican_client from oslo_log import log as logging from oslo_utils import excutils from magnum.common.cert_manager import cert_manager from magnum.common import clients from magnum.common import context from magnum.common import exception as magnum_exc from magnum.i18n import _ LOG = logging.getLogger(__name__) class Cert(cert_manager.Cert): """Representation of a Cert based on the Barbican CertificateContainer.""" def __init__(self, cert_container): if not isinstance(cert_container, barbican_client.containers.CertificateContainer): raise TypeError(_( "Retrieved Barbican Container is not of the correct type " "(certificate).")) self._cert_container = cert_container # Container secrets are accessed upon query and can return as None, # don't return the payload if the secret is not available. def get_certificate(self): if self._cert_container.certificate: return self._cert_container.certificate.payload def get_intermediates(self): if self._cert_container.intermediates: return self._cert_container.intermediates.payload def get_private_key(self): if self._cert_container.private_key: return self._cert_container.private_key.payload def get_private_key_passphrase(self): if self._cert_container.private_key_passphrase: return self._cert_container.private_key_passphrase.payload _ADMIN_OSC = None def get_admin_clients(): global _ADMIN_OSC if not _ADMIN_OSC: _ADMIN_OSC = clients.OpenStackClients( context.RequestContext(is_admin=True)) return _ADMIN_OSC class CertManager(cert_manager.CertManager): """Certificate Manager that wraps the Barbican client API.""" @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='Magnum TLS Cert', **kwargs): """Stores a certificate in the certificate manager. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = get_admin_clients().barbican() LOG.info("Storing certificate container '{0}' in Barbican." .format(name)) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate" ) private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key" ) certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret ) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates" ) certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase" ) certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref # Barbican (because of Keystone-middleware) sometimes masks # exceptions strangely -- this will catch anything that it raises and # reraise the original exception, while also providing useful # feedback in the logs for debugging except magnum_exc.CertificateStorageException: for secret in [certificate_secret, private_key_secret, intermediates_secret, pkp_secret]: if secret and secret.secret_ref: old_ref = secret.secret_ref try: secret.delete() LOG.info("Deleted secret {0} ({1}) during rollback." .format(secret.name, old_ref)) except Exception: LOG.warning( "Failed to delete {0} ({1}) during rollback. " "This is probably not a problem." .format(secret.name, old_ref)) with excutils.save_and_reraise_exception(): LOG.exception("Error storing certificate data") @staticmethod def get_cert(cert_ref, service_name='Magnum', resource_ref=None, check_only=False, **kwargs): """Retrieves the specified cert and registers as a consumer. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :return: Magnum.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = get_admin_clients().barbican() LOG.info( "Loading certificate container {0} from Barbican." .format(cert_ref)) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) return Cert(cert_container) except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception("Error getting {0}".format(cert_ref)) @staticmethod def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = get_admin_clients().barbican() LOG.info( "Recursively deleting certificate container {0} from Barbican." .format(cert_ref)) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception( "Error recursively deleting certificate container {0}" .format(cert_ref)) magnum-6.1.0/magnum/common/docker_utils.py0000666000175100017510000000643613244017334020657 0ustar zuulzuul00000000000000# Copyright 2015 Rackspace All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import docker from docker.utils import utils from magnum.conductor.handlers.common import cert_manager from magnum.conductor import utils as conductor_utils import magnum.conf CONF = magnum.conf.CONF def parse_docker_image(image): image_parts = image.split(':', 1) image_repo = image_parts[0] image_tag = None if len(image_parts) > 1: image_tag = image_parts[1] return image_repo, image_tag def is_docker_library_version_atleast(version): if utils.compare_version(docker.version, version) <= 0: return True return False def is_docker_api_version_atleast(docker, version): if utils.compare_version(docker.version()['ApiVersion'], version) <= 0: return True return False @contextlib.contextmanager def docker_for_cluster(context, cluster): cluster_template = conductor_utils.retrieve_cluster_template( context, cluster) ca_cert, magnum_key, magnum_cert = None, None, None client_kwargs = dict() if not cluster_template.tls_disabled: (ca_cert, magnum_key, magnum_cert) = cert_manager.create_client_files(cluster, context) client_kwargs['ca_cert'] = ca_cert.name client_kwargs['client_key'] = magnum_key.name client_kwargs['client_cert'] = magnum_cert.name yield DockerHTTPClient( cluster.api_address, CONF.docker.docker_remote_api_version, CONF.docker.default_timeout, **client_kwargs ) if ca_cert: ca_cert.close() if magnum_key: magnum_key.close() if magnum_cert: magnum_cert.close() class DockerHTTPClient(docker.APIClient): def __init__(self, url='unix://var/run/docker.sock', ver=CONF.docker.docker_remote_api_version, timeout=CONF.docker.default_timeout, ca_cert=None, client_key=None, client_cert=None): if ca_cert and client_key and client_cert: ssl_config = docker.tls.TLSConfig( client_cert=(client_cert, client_key), verify=ca_cert, assert_hostname=False, ) else: ssl_config = False super(DockerHTTPClient, self).__init__( base_url=url, version=ver, timeout=timeout, tls=ssl_config ) def list_instances(self, inspect=False): res = [] for container in self.containers(all=True): info = self.inspect_container(container['Id']) if not info: continue if inspect: res.append(info) else: res.append(info['Config'].get('Hostname')) return res magnum-6.1.0/magnum/common/x509/0000775000175100017510000000000013244017675016320 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/x509/operations.py0000666000175100017510000002140613244017334021052 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import six import uuid from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 from oslo_log import log as logging from magnum.common import exception from magnum.common.x509 import validator import magnum.conf LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF def generate_ca_certificate(subject_name, encryption_password=None): """Generate CA Certificate :param subject_name: subject name of CA :param encryption_password: encryption passsword for private key :returns: generated private key and certificate pair """ return _generate_self_signed_certificate( subject_name, _build_ca_extentions(), encryption_password=encryption_password ) def generate_client_certificate(issuer_name, subject_name, organization_name, ca_key, encryption_password=None, ca_key_password=None): """Generate Client Certificate :param issuer_name: issuer name :param subject_name: subject name of client :param organization_name: Organization name of client :param ca_key: private key of CA :param encryption_password: encryption passsword for private key :param ca_key_password: private key password for given ca key :returns: generated private key and certificate pair """ return _generate_certificate(issuer_name, subject_name, _build_client_extentions(), organization_name, ca_key=ca_key, encryption_password=encryption_password, ca_key_password=ca_key_password) def _build_client_extentions(): # Digital Signature and Key Encipherment are enabled key_usage = x509.KeyUsage(True, False, True, False, False, False, False, False, False) key_usage = x509.Extension(key_usage.oid, True, key_usage) extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH]) extended_key_usage = x509.Extension(extended_key_usage.oid, False, extended_key_usage) basic_constraints = x509.BasicConstraints(ca=False, path_length=None) basic_constraints = x509.Extension(basic_constraints.oid, True, basic_constraints) return [key_usage, extended_key_usage, basic_constraints] def _build_ca_extentions(): # Certificate Sign is enabled key_usage = x509.KeyUsage(False, False, False, False, False, True, False, False, False) key_usage = x509.Extension(key_usage.oid, True, key_usage) basic_constraints = x509.BasicConstraints(ca=True, path_length=0) basic_constraints = x509.Extension(basic_constraints.oid, True, basic_constraints) return [basic_constraints, key_usage] def _generate_self_signed_certificate(subject_name, extensions, encryption_password=None): return _generate_certificate(subject_name, subject_name, extensions, encryption_password=encryption_password) def _generate_certificate(issuer_name, subject_name, extensions, organization_name=None, ca_key=None, encryption_password=None, ca_key_password=None): if not isinstance(subject_name, six.text_type): subject_name = six.text_type(subject_name.decode('utf-8')) if organization_name and not isinstance(organization_name, six.text_type): organization_name = six.text_type(organization_name.decode('utf-8')) private_key = rsa.generate_private_key( public_exponent=65537, key_size=CONF.x509.rsa_key_size, backend=default_backend() ) # subject name is set as common name csr = x509.CertificateSigningRequestBuilder() name_attributes = [x509.NameAttribute(x509.OID_COMMON_NAME, subject_name)] if organization_name: name_attributes.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME, organization_name)) csr = csr.subject_name(x509.Name(name_attributes)) for extention in extensions: csr = csr.add_extension(extention.value, critical=extention.critical) # if ca_key is not provided, it means self signed if not ca_key: ca_key = private_key ca_key_password = encryption_password csr = csr.sign(private_key, hashes.SHA256(), default_backend()) if encryption_password: encryption_algorithm = serialization.BestAvailableEncryption( encryption_password) else: encryption_algorithm = serialization.NoEncryption() private_key = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=encryption_algorithm ) keypairs = { 'private_key': private_key, 'certificate': sign( csr, issuer_name, ca_key, ca_key_password=ca_key_password, skip_validation=True), } return keypairs def _load_pem_private_key(ca_key, ca_key_password=None): if not isinstance(ca_key, rsa.RSAPrivateKey): if isinstance(ca_key, six.text_type): ca_key = six.b(str(ca_key)) if isinstance(ca_key_password, six.text_type): ca_key_password = six.b(str(ca_key_password)) ca_key = serialization.load_pem_private_key( ca_key, password=ca_key_password, backend=default_backend() ) return ca_key def sign(csr, issuer_name, ca_key, ca_key_password=None, skip_validation=False): """Sign a given csr :param csr: certificate signing request object or pem encoded csr :param issuer_name: issuer name :param ca_key: private key of CA :param ca_key_password: private key password for given ca key :param skip_validation: skip csr validation if true :returns: generated certificate """ ca_key = _load_pem_private_key(ca_key, ca_key_password) if not isinstance(issuer_name, six.text_type): issuer_name = six.text_type(issuer_name.decode('utf-8')) if isinstance(csr, six.text_type): csr = six.b(str(csr)) if not isinstance(csr, x509.CertificateSigningRequest): try: csr = x509.load_pem_x509_csr(csr, backend=default_backend()) except ValueError: LOG.exception("Received invalid csr {0}.".format(csr)) raise exception.InvalidCsr(csr=csr) term_of_validity = CONF.x509.term_of_validity one_day = datetime.timedelta(1, 0, 0) expire_after = datetime.timedelta(term_of_validity, 0, 0) builder = x509.CertificateBuilder() builder = builder.subject_name(csr.subject) # issuer_name is set as common name builder = builder.issuer_name(x509.Name([ x509.NameAttribute(x509.OID_COMMON_NAME, issuer_name), ])) builder = builder.not_valid_before(datetime.datetime.today() - one_day) builder = builder.not_valid_after(datetime.datetime.today() + expire_after) builder = builder.serial_number(int(uuid.uuid4())) builder = builder.public_key(csr.public_key()) if skip_validation: extensions = csr.extensions else: extensions = validator.filter_extensions(csr.extensions) for extention in extensions: builder = builder.add_extension(extention.value, critical=extention.critical) certificate = builder.sign( private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend() ).public_bytes(serialization.Encoding.PEM) return certificate def decrypt_key(encrypted_key, password): private_key = _load_pem_private_key(encrypted_key, password) decrypted_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) return decrypted_pem magnum-6.1.0/magnum/common/x509/validator.py0000666000175100017510000000645113244017334020657 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import x509 from magnum.common import exception from magnum.common.x509 import extensions import magnum.conf _CA_KEY_USAGES = [ extensions.KeyUsages.KEY_CERT_SIGN.value[0], extensions.KeyUsages.CRL_SIGN.value[0] ] CONF = magnum.conf.CONF def filter_extensions(extensions): filtered_extensions = [] allowed_key_usage = set(CONF.x509.allowed_key_usage) if not CONF.x509.allow_ca: allowed_key_usage = _remove_ca_key_usage(allowed_key_usage) for ext in filter_allowed_extensions(extensions, CONF.x509.allowed_extensions): if ext.oid == x509.OID_KEY_USAGE: ext = _merge_key_usage(ext, allowed_key_usage) elif ext.oid == x509.OID_BASIC_CONSTRAINTS: if not CONF.x509.allow_ca: ext = _disallow_ca_in_basic_constraints(ext) filtered_extensions.append(ext) return filtered_extensions def filter_allowed_extensions(extensions, allowed_extensions=None): """Ensure only accepted extensions are used.""" allowed_extensions = allowed_extensions or [] for ext in extensions: ext_name = x509.oid._OID_NAMES.get(ext.oid, None) if ext_name in allowed_extensions: yield ext else: if ext.critical: raise exception.CertificateValidationError(extension=ext) def _merge_key_usage(key_usage, allowed_key_usage): critical = key_usage.critical key_usage_value = key_usage.value usages = [] for usage in extensions.KeyUsages: k, v = usage.value try: value = getattr(key_usage_value, v) except ValueError: # ValueError is raised when encipher_only/decipher_only is # retrieved but key_agreement is False value = False if value: if k not in allowed_key_usage: if critical: raise exception.CertificateValidationError( extension=key_usage) else: value = False usages.append(value) rtn = x509.KeyUsage(*usages) return x509.Extension(rtn.oid, critical, rtn) def _remove_ca_key_usage(allowed_key_usage): for usage in _CA_KEY_USAGES: try: allowed_key_usage.remove(usage) except KeyError: pass return allowed_key_usage def _disallow_ca_in_basic_constraints(basic_constraints): if basic_constraints.value.ca: if basic_constraints.critical: raise exception.CertificateValidationError( extension=basic_constraints) bc = x509.BasicConstraints(False, None) return x509.Extension(bc.oid, False, bc) return basic_constraints magnum-6.1.0/magnum/common/x509/extensions.py0000666000175100017510000000452313244017334021067 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class Extensions(enum.Enum): __order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER ' 'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS ' 'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES ' 'EXTENDED_KEY_USAGE OCSP_NO_CHECK INHIBIT_ANY_POLICY ' 'KEY_USAGE NAME_CONSTRAINTS SUBJECT_ALTERNATIVE_NAME ' 'ISSUER_ALTERNATIVE_NAME') AUTHORITY_KEY_IDENTIFIER = "authorityKeyIdentifier" SUBJECT_KEY_IDENTIFIER = "subjectKeyIdentifier" AUTHORITY_INFORMATION_ACCESS = "authorityInfoAccess" BASIC_CONSTRAINTS = "basicConstraints" CRL_DISTRIBUTION_POINTS = "cRLDistributionPoints" CERTIFICATE_POLICIES = "certificatePolicies" EXTENDED_KEY_USAGE = "extendedKeyUsage" OCSP_NO_CHECK = "OCSPNoCheck" INHIBIT_ANY_POLICY = "inhibitAnyPolicy" KEY_USAGE = "keyUsage" NAME_CONSTRAINTS = "nameConstraints" SUBJECT_ALTERNATIVE_NAME = "subjectAltName" ISSUER_ALTERNATIVE_NAME = "issuerAltName" class KeyUsages(enum.Enum): __order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT ' 'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN ' 'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY') DIGITAL_SIGNATURE = ("Digital Signature", "digital_signature") CONTENT_COMMITMENT = ("Non Repudiation", "content_commitment") KEY_ENCIPHERMENT = ("Key Encipherment", "key_encipherment") DATA_ENCIPHERMENT = ("Data Encipherment", "data_encipherment") KEY_AGREEMENT = ("Key Agreement", "key_agreement") KEY_CERT_SIGN = ("Certificate Sign", "key_cert_sign") CRL_SIGN = ("CRL Sign", "crl_sign") ENCIPHER_ONLY = ("Encipher Only", "encipher_only") DECIPHER_ONLY = ("Decipher Only", "decipher_only") magnum-6.1.0/magnum/common/x509/__init__.py0000666000175100017510000000000013244017334020411 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/service.py0000666000175100017510000000160213244017334017616 0ustar zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from magnum.common import config import magnum.conf CONF = magnum.conf.CONF def prepare_service(argv=None): if argv is None: argv = [] logging.register_options(CONF) config.parse_args(argv) config.set_config_defaults() logging.setup(CONF, 'magnum') magnum-6.1.0/magnum/common/rpc_service.py0000666000175100017510000000720513244017334020467 0ustar zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common RPC service and API tools for Magnum.""" import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_service import service from oslo_utils import importutils from magnum.common import profiler from magnum.common import rpc import magnum.conf from magnum.objects import base as objects_base from magnum.service import periodic from magnum.servicegroup import magnum_service_periodic as servicegroup osprofiler = importutils.try_import("osprofiler.profiler") CONF = magnum.conf.CONF def _init_serializer(): serializer = rpc.RequestContextSerializer( objects_base.MagnumObjectSerializer()) if osprofiler: serializer = rpc.ProfilerRequestContextSerializer(serializer) else: serializer = rpc.RequestContextSerializer(serializer) return serializer class Service(service.Service): def __init__(self, topic, server, handlers, binary): super(Service, self).__init__() serializer = _init_serializer() transport = messaging.get_rpc_transport(CONF) # TODO(asalkeld) add support for version='x.y' access_policy = dispatcher.DefaultRPCAccessPolicy target = messaging.Target(topic=topic, server=server) self._server = messaging.get_rpc_server(transport, target, handlers, executor='eventlet', serializer=serializer, access_policy=access_policy) self.binary = binary profiler.setup(binary, CONF.host) def start(self): self._server.start() def create_periodic_tasks(self): if CONF.periodic_enable: periodic.setup(CONF, self.tg) servicegroup.setup(CONF, self.binary, self.tg) def stop(self): if self._server: self._server.stop() self._server.wait() super(Service, self).stop() @classmethod def create(cls, topic, server, handlers, binary): service_obj = cls(topic, server, handlers, binary) return service_obj class API(object): def __init__(self, transport=None, context=None, topic=None, server=None, timeout=None): serializer = _init_serializer() if transport is None: exmods = rpc.get_allowed_exmods() transport = messaging.get_rpc_transport( CONF, allowed_remote_exmods=exmods) self._context = context if topic is None: topic = '' target = messaging.Target(topic=topic, server=server) self._client = messaging.RPCClient(transport, target, serializer=serializer, timeout=timeout) def _call(self, method, *args, **kwargs): return self._client.call(self._context, method, *args, **kwargs) def _cast(self, method, *args, **kwargs): self._client.cast(self._context, method, *args, **kwargs) def echo(self, message): self._cast('echo', message=message) magnum-6.1.0/magnum/common/rpc.py0000666000175100017510000001220213244017334016740 0ustar zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] import socket import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from oslo_utils import importutils from magnum.common import context as magnum_context from magnum.common import exception import magnum.conf profiler = importutils.try_import("osprofiler.profiler") CONF = magnum.conf.CONF TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return magnum_context.RequestContext.from_dict(context) class ProfilerRequestContextSerializer(RequestContextSerializer): def serialize_context(self, context): _context = super(ProfilerRequestContextSerializer, self).serialize_context(context) prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: profiler.init(**trace_info) return super(ProfilerRequestContextSerializer, self).deserialize_context(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, version_cap=None, serializer=None, timeout=None): assert TRANSPORT is not None if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer, timeout=timeout) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None access_policy = dispatcher.DefaultRPCAccessPolicy if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) def get_notifier(service='container-infra', host=None, publisher_id=None): assert NOTIFIER is not None myhost = CONF.host if myhost is None: myhost = socket.getfqdn() if not publisher_id: publisher_id = "%s.%s" % (service, host or myhost) return NOTIFIER.prepare(publisher_id=publisher_id) magnum-6.1.0/magnum/common/exception.py0000777000175100017510000002576013244017334020172 0ustar zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnum base exception handling. Includes decorator for re-raising Magnum-type exceptions. """ import functools import sys from keystoneclient import exceptions as keystone_exceptions from oslo_config import cfg from oslo_log import log as logging import six import magnum.conf from magnum.i18n import _ LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF try: CONF.import_opt('fatal_exception_format_errors', 'oslo_versionedobjects.exception') except cfg.NoSuchOptError as e: # Note:work around for magnum run against master branch # in devstack gate job, as magnum not branched yet # verisonobjects kilo/master different version can # cause issue here. As it changed import group. So # add here before branch to prevent gate failure. # Bug: #1447873 CONF.import_opt('fatal_exception_format_errors', 'oslo_versionedobjects.exception', group='oslo_versionedobjects') def wrap_keystone_exception(func): """Wrap keystone exceptions and throw Magnum specific exceptions.""" @functools.wraps(func) def wrapped(*args, **kw): try: return func(*args, **kw) except keystone_exceptions.AuthorizationFailure: raise AuthorizationFailure( client=func.__name__, message="reason: %s" % sys.exc_info()[1]) except keystone_exceptions.ClientException: raise AuthorizationFailure( client=func.__name__, message="unexpected keystone client error occurred: %s" % sys.exc_info()[1]) return wrapped class MagnumException(Exception): """Base Magnum Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs and hasattr(self, 'code'): self.kwargs['code'] = self.code if message: self.message = message try: self.message = self.message % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation, ' 'kwargs: %s', kwargs) try: if CONF.fatal_exception_format_errors: raise except cfg.NoSuchOptError: # Note: work around for Bug: #1447873 if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise super(MagnumException, self).__init__(self.message) def __str__(self): if six.PY3: return self.message return self.message.encode('utf-8') def __unicode__(self): return self.message def format_message(self): if self.__class__.__name__.endswith('_Remote'): return self.args[0] else: return six.text_type(self) class ObjectNotFound(MagnumException): message = _("The %(name)s %(id)s could not be found.") code = 404 class ProjectNotFound(ObjectNotFound): message = _("The %(name)s %(id)s could not be found.") class ResourceNotFound(ObjectNotFound): message = _("The %(name)s resource %(id)s could not be found.") class AuthorizationFailure(MagnumException): message = _("%(client)s connection failed. %(message)s") code = 403 class Invalid(MagnumException): message = _("Unacceptable parameters.") code = 400 class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class InvalidName(Invalid): message = _("Expected a name but received %(name)s.") class InvalidDiscoveryURL(Invalid): message = _("Received invalid discovery URL '%(discovery_url)s' for " "discovery endpoint '%(discovery_endpoint)s'.") class GetDiscoveryUrlFailed(MagnumException): message = _("Failed to get discovery url from '%(discovery_endpoint)s'.") class InvalidClusterDiscoveryURL(Invalid): message = _("Invalid discovery URL '%(discovery_url)s'.") class InvalidClusterSize(Invalid): message = _("Expected cluster size %(expect_size)d but get cluster " "size %(size)d from '%(discovery_url)s'.") class GetClusterSizeFailed(MagnumException): message = _("Failed to get the size of cluster from '%(discovery_url)s'.") class InvalidIdentity(Invalid): message = _("Expected an uuid or int but received %(identity)s.") class InvalidCsr(Invalid): message = _("Received invalid csr %(csr)s.") class InvalidSubnet(Invalid): message = _("Received invalid subnet %(subnet)s.") class HTTPNotFound(ResourceNotFound): pass class Conflict(MagnumException): message = _('Conflict.') code = 409 class ApiVersionsIntersect(Invalid): message = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects " "with another versions.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class PatchError(Invalid): message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") class NotAuthorized(MagnumException): message = _("Not authorized.") code = 403 class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class InvalidMAC(Invalid): message = _("Expected a MAC address but received %(mac)s.") class ConfigInvalid(Invalid): message = _("Invalid configuration file. %(error_msg)s") class ClusterTemplateNotFound(ResourceNotFound): message = _("ClusterTemplate %(clustertemplate)s could not be found.") class ClusterTemplateAlreadyExists(Conflict): message = _("A ClusterTemplate with UUID %(uuid)s already exists.") class ClusterTemplateReferenced(Invalid): message = _("ClusterTemplate %(clustertemplate)s is referenced by one or" " multiple clusters.") class ClusterTemplatePublishDenied(NotAuthorized): message = _("Not authorized to set public flag for cluster template.") class ClusterNotFound(ResourceNotFound): message = _("Cluster %(cluster)s could not be found.") class ClusterAlreadyExists(Conflict): message = _("A cluster with UUID %(uuid)s already exists.") class NotSupported(MagnumException): message = _("%(operation)s is not supported.") code = 400 class ClusterTypeNotSupported(NotSupported): message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)" " not supported.") class RequiredParameterNotProvided(Invalid): message = _("Required parameter %(heat_param)s not provided.") class OperationInProgress(Invalid): message = _("Cluster %(cluster_name)s already has an operation in " "progress.") class ImageNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Image %(image_id)s could not be found.") code = 400 class ImageNotAuthorized(NotAuthorized): message = _("Not authorized for image %(image_id)s.") class OSDistroFieldNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Image %(image_id)s doesn't contain os_distro field.") code = 400 class X509KeyPairNotFound(ResourceNotFound): message = _("A key pair %(x509keypair)s could not be found.") class X509KeyPairAlreadyExists(Conflict): message = _("A key pair with UUID %(uuid)s already exists.") class CertificateStorageException(MagnumException): message = _("Could not store certificate: %(msg)s") class CertificateValidationError(Invalid): message = _("Extension '%(extension)s' not allowed") class KeyPairNotFound(ResourceNotFound): message = _("Unable to find keypair %(keypair)s.") class MagnumServiceNotFound(ResourceNotFound): message = _("A magnum service %(magnum_service_id)s could not be found.") class MagnumServiceAlreadyExists(Conflict): message = _("A magnum service with ID %(id)s already exists.") class UnsupportedK8sQuantityFormat(Invalid): message = _("Unsupported quantity format for k8s cluster.") class UnsupportedDockerQuantityFormat(Invalid): message = _("Unsupported quantity format for Swarm cluster.") class FlavorNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Unable to find flavor %(flavor)s.") code = 400 class ExternalNetworkNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" """"Ensure the network is not private.""" message = _("Unable to find external network %(network)s.") code = 400 class TrustCreateFailed(MagnumException): message = _("Failed to create trust for trustee %(trustee_user_id)s.") class TrustDeleteFailed(MagnumException): message = _("Failed to delete trust %(trust_id)s.") class TrusteeCreateFailed(MagnumException): message = _("Failed to create trustee %(username)s " "in domain %(domain_id)s") class TrusteeDeleteFailed(MagnumException): message = _("Failed to delete trustee %(trustee_id)s") class QuotaAlreadyExists(Conflict): message = _("Quota for project %(project_id)s already exists " "for resource %(resource)s.") class QuotaNotFound(ResourceNotFound): message = _("Quota could not be found: %(msg)s") class ResourceLimitExceeded(NotAuthorized): message = _('Resource limit exceeded: %(msg)s') class RegionsListFailed(MagnumException): message = _("Failed to list regions.") class ServicesListFailed(MagnumException): message = _("Failed to list services.") class TrusteeOrTrustToClusterFailed(MagnumException): message = _("Failed to create trustee or trust for Cluster: " "%(cluster_uuid)s") class CertificatesToClusterFailed(MagnumException): message = _("Failed to create certificates for Cluster: %(cluster_uuid)s") class FederationNotFound(ResourceNotFound): message = _("Federation %(federation)s could not be found.") class FederationAlreadyExists(Conflict): message = _("A federation with UUID %(uuid)s already exists.") class MemberAlreadyExists(Conflict): message = _("A cluster with UUID %(uuid)s is already a member of the" "federation %(federation_name)s.") magnum-6.1.0/magnum/common/keystone.py0000777000175100017510000003047213244017334020031 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1.access import access as ka_access from keystoneauth1 import exceptions as ka_exception from keystoneauth1.identity import access as ka_access_plugin from keystoneauth1.identity import v3 as ka_v3 from keystoneauth1 import loading as ka_loading import keystoneclient.exceptions as kc_exception from keystoneclient.v3 import client as kc_v3 from oslo_log import log as logging from magnum.common import exception import magnum.conf from magnum.conf import keystone as ksconf from magnum.i18n import _ CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class KeystoneClientV3(object): """Keystone client wrapper so we can encapsulate logic in one place.""" def __init__(self, context): self.context = context self._client = None self._domain_admin_auth = None self._domain_admin_session = None self._domain_admin_client = None self._trustee_domain_id = None self._session = None @property def auth_url(self): # FIXME(pauloewerton): auth_url should be retrieved from keystone_auth # section by default return CONF[ksconf.CFG_LEGACY_GROUP].auth_uri.replace('v2.0', 'v3') @property def auth_token(self): return self.session.get_token() @property def session(self): if self._session: return self._session auth = self._get_auth() session = self._get_session(auth) self._session = session return session def _get_session(self, auth): session = ka_loading.load_session_from_conf_options( CONF, ksconf.CFG_GROUP, auth=auth) return session def _get_auth(self): if self.context.auth_token_info: access_info = ka_access.create(body=self.context.auth_token_info, auth_token=self.context.auth_token) auth = ka_access_plugin.AccessInfoPlugin(access_info) elif self.context.auth_token: auth = ka_v3.Token(auth_url=self.auth_url, token=self.context.auth_token) elif self.context.trust_id: auth_info = { 'auth_url': self.auth_url, 'username': self.context.user_name, 'password': self.context.password, 'user_domain_id': self.context.user_domain_id, 'user_domain_name': self.context.user_domain_name, 'trust_id': self.context.trust_id } auth = ka_v3.Password(**auth_info) elif self.context.is_admin: try: auth = ka_loading.load_auth_from_conf_options( CONF, ksconf.CFG_GROUP) except ka_exception.MissingRequiredOptions: auth = self._get_legacy_auth() else: msg = ('Keystone API connection failed: no password, ' 'trust_id or token found.') LOG.error(msg) raise exception.AuthorizationFailure(client='keystone', message='reason %s' % msg) return auth def _get_legacy_auth(self): LOG.warning('Auth plugin and its options for service user ' 'must be provided in [%(new)s] section. ' 'Using values from [%(old)s] section is ' 'deprecated.', {'new': ksconf.CFG_GROUP, 'old': ksconf.CFG_LEGACY_GROUP}) conf = getattr(CONF, ksconf.CFG_LEGACY_GROUP) # FIXME(htruta, pauloewerton): Conductor layer does not have # new v3 variables, such as project_name and project_domain_id. # The use of admin_* variables is related to Identity API v2.0, # which is now deprecated. We should also stop using hard-coded # domain info, as well as variables that refer to `tenant`, # as they are also v2 related. auth = ka_v3.Password(auth_url=self.auth_url, username=conf.admin_user, password=conf.admin_password, project_name=conf.admin_tenant_name, project_domain_id='default', user_domain_id='default') return auth @property def client(self): if self._client: return self._client client = kc_v3.Client(session=self.session, trust_id=self.context.trust_id) self._client = client return client @property def domain_admin_auth(self): user_domain_id = ( CONF.trust.trustee_domain_admin_domain_id or CONF.trust.trustee_domain_id ) user_domain_name = ( CONF.trust.trustee_domain_admin_domain_name or CONF.trust.trustee_domain_name ) if not self._domain_admin_auth: self._domain_admin_auth = ka_v3.Password( auth_url=self.auth_url, user_id=CONF.trust.trustee_domain_admin_id, username=CONF.trust.trustee_domain_admin_name, user_domain_id=user_domain_id, user_domain_name=user_domain_name, domain_id=CONF.trust.trustee_domain_id, domain_name=CONF.trust.trustee_domain_name, password=CONF.trust.trustee_domain_admin_password) return self._domain_admin_auth @property def domain_admin_session(self): if not self._domain_admin_session: session = ka_loading.session.Session().load_from_options( auth=self.domain_admin_auth, insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) self._domain_admin_session = session return self._domain_admin_session @property def domain_admin_client(self): if not self._domain_admin_client: self._domain_admin_client = kc_v3.Client( session=self.domain_admin_session ) return self._domain_admin_client @property def trustee_domain_id(self): if not self._trustee_domain_id: try: access = self.domain_admin_auth.get_access( self.domain_admin_session ) except kc_exception.Unauthorized: msg = "Keystone client authentication failed" LOG.error(msg) raise exception.AuthorizationFailure(client='keystone', message='reason: %s' % msg) self._trustee_domain_id = access.domain_id return self._trustee_domain_id def create_trust(self, trustee_user): trustor_user_id = self.session.get_user_id() trustor_project_id = self.session.get_project_id() # inherit the role of the trustor, unless set CONF.trust.roles if CONF.trust.roles: roles = CONF.trust.roles else: roles = self.context.roles try: trust = self.client.trusts.create( trustor_user=trustor_user_id, project=trustor_project_id, trustee_user=trustee_user, impersonation=True, delegation_depth=0, role_names=roles) except Exception: LOG.exception('Failed to create trust') raise exception.TrustCreateFailed( trustee_user_id=trustee_user) return trust def delete_trust(self, context, cluster): if cluster.trust_id is None: return # Trust can only be deleted by the user who creates it. So when # other users in the same project want to delete the cluster, we need # use the trustee which can impersonate the trustor to delete the # trust. if context.user_id == cluster.user_id: client = self.client else: auth = ka_v3.Password(auth_url=self.auth_url, user_id=cluster.trustee_user_id, password=cluster.trustee_password, trust_id=cluster.trust_id) sess = ka_loading.session.Session().load_from_options( auth=auth, insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) client = kc_v3.Client(session=sess) try: client.trusts.delete(cluster.trust_id) except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to delete trust') raise exception.TrustDeleteFailed(trust_id=cluster.trust_id) def create_trustee(self, username, password): domain_id = self.trustee_domain_id try: user = self.domain_admin_client.users.create( name=username, password=password, domain=domain_id) except Exception: LOG.exception('Failed to create trustee') raise exception.TrusteeCreateFailed(username=username, domain_id=domain_id) return user def delete_trustee(self, trustee_id): try: self.domain_admin_client.users.delete(trustee_id) except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to delete trustee') raise exception.TrusteeDeleteFailed(trustee_id=trustee_id) def get_validate_region_name(self, region_name): if region_name is None: message = _("region_name needs to be configured in magnum.conf") raise exception.InvalidParameterValue(message) """matches the region of a public endpoint for the Keystone service.""" try: regions = self.client.regions.list() except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to list regions') raise exception.RegionsListFailed() region_list = [] for region in regions: region_list.append(region.id) if region_name not in region_list: raise exception.InvalidParameterValue(_( 'region_name %(region_name)s is invalid, ' 'expecting a region_name in %(region_name_list)s.') % { 'region_name': region_name, 'region_name_list': '/'.join( region_list + ['unspecified'])}) return region_name def is_octavia_enabled(): """Check if Octavia service is deployed in the cloud. Octavia is already an official LBaaS solution for Openstack (https://governance.openstack.org/tc/reference/projects/octavia.html) and will deprecate the neutron-lbaas extension starting from Queens release. We use Octavia instead of Neutron LBaaS API for load balancing functionality for k8s cluster if Octavia service is deployed and enabled in the cloud. """ # Put the import here to avoid circular importing. from magnum.common import context admin_context = context.make_admin_context() keystone = KeystoneClientV3(admin_context) try: octavia_svc = keystone.client.services.list(type='load-balancer') except Exception: LOG.exception('Failed to list services') raise exception.ServicesListFailed() # Always assume there is only one load balancing service configured. if octavia_svc and octavia_svc[0].enabled: return True return False magnum-6.1.0/magnum/common/policies/0000775000175100017510000000000013244017675017422 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/policies/bay.py0000666000175100017510000000475113244017334020550 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base BAY = 'bay:%s' rules = [ policy.DocumentedRuleDefault( name=BAY % 'create', check_str=base.RULE_DENY_CLUSTER_USER, description='Create a new bay.', operations=[ { 'path': '/v1/bays', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=BAY % 'delete', check_str=base.RULE_DENY_CLUSTER_USER, description='Delete a bay.', operations=[ { 'path': '/v1/bays/{bay_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=BAY % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of bays with detail.', operations=[ { 'path': '/v1/bays', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAY % 'get', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve information about the given bay.', operations=[ { 'path': '/v1/bays/{bay_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAY % 'get_all', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of bays.', operations=[ { 'path': '/v1/bays/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAY % 'update', check_str=base.RULE_DENY_CLUSTER_USER, description='Update an existing bay.', operations=[ { 'path': '/v1/bays/{bay_ident}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/certificate.py0000666000175100017510000000355513244017334022260 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CERTIFICATE = 'certificate:%s' RULE_ADMIN_OR_USER_OR_CLUSTER_USER = base.RULE_ADMIN_OR_USER + " or " + \ base.RULE_CLUSTER_USER rules = [ policy.DocumentedRuleDefault( name=CERTIFICATE % 'create', check_str=RULE_ADMIN_OR_USER_OR_CLUSTER_USER, description='Sign a new certificate by the CA.', operations=[ { 'path': '/v1/certificates', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CERTIFICATE % 'get', check_str=RULE_ADMIN_OR_USER_OR_CLUSTER_USER, description='Retrieve CA information about the given bay/cluster.', operations=[ { 'path': '/v1/certificates/{bay_uuid/cluster_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CERTIFICATE % 'rotate_ca', check_str=base.RULE_ADMIN_OR_OWNER, description='Rotate the CA certificate on the given bay/cluster.', operations=[ { 'path': '/v1/certificates/{bay_uuid/cluster_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/federation.py0000666000175100017510000000521213244017334022106 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base FEDERATION = 'federation:%s' rules = [ policy.DocumentedRuleDefault( name=FEDERATION % 'create', check_str=base.RULE_DENY_CLUSTER_USER, description='Create a new federation.', operations=[ { 'path': '/v1/federations', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'delete', check_str=base.RULE_DENY_CLUSTER_USER, description='Delete a federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of federations with detail.', operations=[ { 'path': '/v1/federations', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'get', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve information about the given federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'get_all', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of federations.', operations=[ { 'path': '/v1/federations/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'update', check_str=base.RULE_DENY_CLUSTER_USER, description='Update an existing federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/stats.py0000666000175100017510000000200513244017334021121 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base STATS = 'stats:%s' rules = [ policy.DocumentedRuleDefault( name=STATS % 'get_all', check_str=base.RULE_ADMIN_OR_OWNER, description='Retrieve magnum stats.', operations=[ { 'path': '/v1/stats', 'method': 'GET' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/cluster_template.py0000666000175100017510000001125613244017334023347 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CLUSTER_TEMPLATE = 'clustertemplate:%s' rules = [ policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'create', check_str=base.RULE_DENY_CLUSTER_USER, description='Create a new cluster template.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'delete', check_str=base.RULE_DENY_CLUSTER_USER, description='Delete a cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'delete_all_projects', check_str=base.RULE_ADMIN_API, description='Delete a cluster template from any project.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'detail_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve a list of cluster templates with detail across ' 'projects.'), operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of cluster templates with detail.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve information about the given cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_one_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve information about the given cluster template ' 'across project.'), operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_all', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of cluster templates.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_all_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of cluster templates across projects.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'update', check_str=base.RULE_DENY_CLUSTER_USER, description='Update an existing cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'publish', check_str=base.RULE_ADMIN_API, description='Publish an existing cluster template.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'POST' }, { 'path': '/v1/clustertemplates', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/base.py0000666000175100017510000000306713244017334020706 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy ROLE_ADMIN = 'rule:context_is_admin' RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_API = 'rule:admin_api' RULE_ADMIN_OR_USER = 'rule:admin_or_user' RULE_CLUSTER_USER = 'rule:cluster_user' RULE_DENY_CLUSTER_USER = 'rule:deny_cluster_user' rules = [ policy.RuleDefault( name='context_is_admin', check_str='role:admin' ), policy.RuleDefault( name='admin_or_owner', check_str='is_admin:True or project_id:%(project_id)s' ), policy.RuleDefault( name='admin_api', check_str='rule:context_is_admin' ), policy.RuleDefault( name='admin_or_user', check_str='is_admin:True or user_id:%(user_id)s' ), policy.RuleDefault( name='cluster_user', check_str='user_id:%(trustee_user_id)s' ), policy.RuleDefault( name='deny_cluster_user', check_str='not domain_id:%(trustee_domain_id)s' ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/__init__.py0000666000175100017510000000262113244017334021526 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from magnum.common.policies import base from magnum.common.policies import bay from magnum.common.policies import baymodel from magnum.common.policies import certificate from magnum.common.policies import cluster from magnum.common.policies import cluster_template from magnum.common.policies import federation from magnum.common.policies import magnum_service from magnum.common.policies import quota from magnum.common.policies import stats def list_rules(): return itertools.chain( base.list_rules(), bay.list_rules(), baymodel.list_rules(), certificate.list_rules(), cluster.list_rules(), cluster_template.list_rules(), federation.list_rules(), magnum_service.list_rules(), quota.list_rules(), stats.list_rules() ) magnum-6.1.0/magnum/common/policies/magnum_service.py0000666000175100017510000000203613244017334022773 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base SERVICE = 'magnum-service:%s' rules = [ policy.DocumentedRuleDefault( name=SERVICE % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of magnum-services.', operations=[ { 'path': '/v1/mservices', 'method': 'GET' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/baymodel.py0000666000175100017510000000575613244017334021577 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base BAYMODEL = 'baymodel:%s' rules = [ policy.DocumentedRuleDefault( name=BAYMODEL % 'create', check_str=base.RULE_DENY_CLUSTER_USER, description='Create a new baymodel.', operations=[ { 'path': '/v1/baymodels', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'delete', check_str=base.RULE_DENY_CLUSTER_USER, description='Delete a baymodel.', operations=[ { 'path': '/v1/baymodels/{baymodel_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of baymodel with detail.', operations=[ { 'path': '/v1/baymodels', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'get', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve information about the given baymodel.', operations=[ { 'path': '/v1/baymodels/{baymodel_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'get_all', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of baymodel.', operations=[ { 'path': '/v1/baymodels', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'update', check_str=base.RULE_DENY_CLUSTER_USER, description='Update an existing baymodel.', operations=[ { 'path': '/v1/baymodels/{baymodel_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=BAYMODEL % 'publish', check_str=base.RULE_ADMIN_API, description='Publish an existing baymodel.', operations=[ { 'path': '/v1/baymodels', 'method': 'POST' }, { 'path': '/v1/baymodels', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/cluster.py0000666000175100017510000000766013244017334021460 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CLUSTER = 'cluster:%s' rules = [ policy.DocumentedRuleDefault( name=CLUSTER % 'create', check_str=base.RULE_DENY_CLUSTER_USER, description='Create a new cluster.', operations=[ { 'path': '/v1/clusters', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'delete', check_str=base.RULE_DENY_CLUSTER_USER, description='Delete a cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'delete_all_projects', check_str=base.RULE_ADMIN_API, description='Delete a cluster from any project.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of clusters with detail.', operations=[ { 'path': '/v1/clusters', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'detail_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of clusters with detail across projects.', operations=[ { 'path': '/v1/clusters', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve information about the given cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_one_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve information about the given cluster across ' 'projects.'), operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_all', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of clusters.', operations=[ { 'path': '/v1/clusters/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_all_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of all clusters across projects.', operations=[ { 'path': '/v1/clusters/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'update', check_str=base.RULE_DENY_CLUSTER_USER, description='Update an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/policies/quota.py0000666000175100017510000000441313244017334021121 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base QUOTA = 'quota:%s' rules = [ policy.DocumentedRuleDefault( name=QUOTA % 'create', check_str=base.RULE_ADMIN_API, description='Create quota.', operations=[ { 'path': '/v1/quotas', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'delete', check_str=base.RULE_ADMIN_API, description='Delete quota for a given project_id and resource.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Retrieve Quota information for the given project_id.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of quotas.', operations=[ { 'path': '/v1/quotas', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'update', check_str=base.RULE_ADMIN_API, description='Update quota for a given project_id.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules magnum-6.1.0/magnum/common/name_generator.py0000666000175100017510000000235413244017334021151 0ustar zuulzuul00000000000000# Copyright 2016 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random class NameGenerator(object): letters = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega'] def __init__(self): self.random = random.Random() def generate(self): '''Generate a random name compose of a Greek leter and a number, like: beta_2. ''' letter = self.random.choice(self.letters) number = self.random.randint(1, 24) return letter + '-' + str(number) magnum-6.1.0/magnum/common/config.py0000666000175100017510000000415013244017334017424 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import cors from magnum.common import rpc import magnum.conf from magnum import version CONF = magnum.conf.CONF def parse_args(argv, default_config_files=None): rpc.set_defaults(control_exchange='magnum') CONF(argv[1:], project='magnum', version=version.version_info.release_string(), default_config_files=default_config_files) rpc.init(CONF) def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID', 'X-Server-Management-Url'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'X-Server-Management-Url'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) magnum-6.1.0/magnum/common/__init__.py0000666000175100017510000000000013244017334017704 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/common/context.py0000666000175100017510000001401013244017334017637 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.green import threading from oslo_context import context from magnum.common import policy import magnum.conf CONF = magnum.conf.CONF class RequestContext(context.RequestContext): """Extends security contexts from the OpenStack common library.""" def __init__(self, auth_token=None, auth_url=None, domain_id=None, domain_name=None, user_name=None, user_id=None, user_domain_name=None, user_domain_id=None, project_name=None, project_id=None, roles=None, is_admin=None, read_only=False, show_deleted=False, request_id=None, trust_id=None, auth_token_info=None, all_tenants=False, password=None, **kwargs): """Stores several additional request parameters: :param domain_id: The ID of the domain. :param domain_name: The name of the domain. :param user_domain_id: The ID of the domain to authenticate a user against. :param user_domain_name: The name of the domain to authenticate a user against. """ super(RequestContext, self).__init__(auth_token=auth_token, user_id=user_name, tenant=project_name, is_admin=is_admin, read_only=read_only, show_deleted=show_deleted, request_id=request_id, roles=roles) self.user_name = user_name self.user_id = user_id self.project_name = project_name self.project_id = project_id self.domain_id = domain_id self.domain_name = domain_name self.user_domain_id = user_domain_id self.user_domain_name = user_domain_name self.auth_url = auth_url self.auth_token_info = auth_token_info self.trust_id = trust_id self.all_tenants = all_tenants self.password = password if is_admin is None: self.is_admin = policy.check_is_admin(self) else: self.is_admin = is_admin def to_dict(self): value = super(RequestContext, self).to_dict() value.update({'auth_token': self.auth_token, 'auth_url': self.auth_url, 'domain_id': self.domain_id, 'domain_name': self.domain_name, 'user_domain_id': self.user_domain_id, 'user_domain_name': self.user_domain_name, 'user_name': self.user_name, 'user_id': self.user_id, 'project_name': self.project_name, 'project_id': self.project_id, 'is_admin': self.is_admin, 'read_only': self.read_only, 'roles': self.roles, 'show_deleted': self.show_deleted, 'request_id': self.request_id, 'trust_id': self.trust_id, 'auth_token_info': self.auth_token_info, 'password': self.password, 'all_tenants': self.all_tenants}) return value @classmethod def from_dict(cls, values): return cls(**values) def make_context(*args, **kwargs): return RequestContext(*args, **kwargs) def make_admin_context(show_deleted=False, all_tenants=False): """Create an administrator context. :param show_deleted: if True, will show deleted items when query db """ context = RequestContext(user_id=None, project=None, is_admin=True, show_deleted=show_deleted, all_tenants=all_tenants) return context def make_cluster_context(cluster, show_deleted=False): """Create a user context based on a cluster's stored Keystone trust. :param cluster: the cluster supplying the Keystone trust to use :param show_deleted: if True, will show deleted items when query db """ context = RequestContext(user_name=cluster.trustee_username, password=cluster.trustee_password, trust_id=cluster.trust_id, show_deleted=show_deleted, user_domain_id=CONF.trust.trustee_domain_id, user_domain_name=CONF.trust.trustee_domain_name) return context _CTX_STORE = threading.local() _CTX_KEY = 'current_ctx' def has_ctx(): return hasattr(_CTX_STORE, _CTX_KEY) def ctx(): return getattr(_CTX_STORE, _CTX_KEY) def set_ctx(new_ctx): if not new_ctx and has_ctx(): delattr(_CTX_STORE, _CTX_KEY) if hasattr(context._request_store, 'context'): delattr(context._request_store, 'context') if new_ctx: setattr(_CTX_STORE, _CTX_KEY, new_ctx) setattr(context._request_store, 'context', new_ctx) def get_admin_context(read_deleted="no"): # NOTE(tovin07): This method should only be used when an admin context is # necessary for the entirety of the context lifetime. return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) magnum-6.1.0/magnum/common/utils.py0000777000175100017510000002123413244017334017324 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import os import random import re import shutil import tempfile from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import netutils import six from magnum.common import exception import magnum.conf CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) MEMORY_UNITS = { 'Ki': 2 ** 10, 'Mi': 2 ** 20, 'Gi': 2 ** 30, 'Ti': 2 ** 40, 'Pi': 2 ** 50, 'Ei': 2 ** 60, 'm': 10 ** -3, 'k': 10 ** 3, 'M': 10 ** 6, 'G': 10 ** 9, 'T': 10 ** 12, 'p': 10 ** 15, 'E': 10 ** 18, '': 1 } DOCKER_MEMORY_UNITS = { 'b': 1, 'k': 2 ** 10, 'm': 2 ** 20, 'g': 2 ** 30, } def _get_root_helper(): return 'sudo magnum-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method. :param cmd: Passed to processutils.execute. :param use_standard_locale: True | False. Defaults to False. If set to True, execute command with standard locale added to environment variables. :returns: (stdout, stderr) from process execution :raises: UnknownArgumentError :raises: ProcessExecutionError """ use_standard_locale = kwargs.pop('use_standard_locale', False) if use_standard_locale: env = kwargs.pop('env_variables', os.environ.copy()) env['LC_ALL'] = 'C' kwargs['env_variables'] = env if kwargs.get('run_as_root') and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() result = processutils.execute(*cmd, **kwargs) LOG.debug('Execution completed, command line is "%s"', ' '.join(map(str, cmd))) LOG.debug('Command stdout is: "%s"', result[0]) LOG.debug('Command stderr is: "%s"', result[1]) return result def trycmd(*args, **kwargs): """Convenience wrapper around oslo's trycmd() method.""" if kwargs.get('run_as_root') and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.trycmd(*args, **kwargs) def validate_and_normalize_mac(address): """Validate a MAC address and return normalized form. Checks whether the supplied MAC address is formally correct and normalize it to all lower case. :param address: MAC address to be validated and normalized. :returns: Normalized and validated MAC address. :raises: InvalidMAC If the MAC address is not valid. """ if not netutils.is_valid_mac(address): raise exception.InvalidMAC(mac=address) return address.lower() @contextlib.contextmanager def tempdir(**kwargs): tempfile.tempdir = CONF.tempdir tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.error('Could not remove tmpdir: %s', e) def rmtree_without_raise(path): try: if os.path.isdir(path): shutil.rmtree(path) except OSError as e: LOG.warning("Failed to remove dir %(path)s, error: %(e)s", {'path': path, 'e': e}) def safe_rstrip(value, chars=None): """Removes trailing characters from a string if that does not make it empty :param value: A string value that will be stripped. :param chars: Characters to remove. :return: Stripped value. """ if not isinstance(value, six.string_types): LOG.warning("Failed to remove trailing character. " "Returning original object. " "Supplied object is not a string: %s,", value) return value return value.rstrip(chars) or value def is_name_safe(name): """Checks whether the name is valid or not. :param name: name of the resource. :returns: True, when name is valid False, otherwise. """ # TODO(madhuri): There should be some validation of name. # Leaving it now as there is no validation # while resource creation. # https://bugs.launchpad.net/magnum/+bug/1430617 if not name: return False return True def get_k8s_quantity(quantity): """This function is used to get k8s quantity. It supports to get CPU and Memory quantity: Kubernetes cpu format must be in the format of: 'm' for example: 500m = 0.5 core of cpu Kubernetes memory format must be in the format of: signedNumber = digits|digits.digits|digits.|.digits suffix = Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'' or suffix = E|e digits = digit | digit digit = 0|1|2|3|4|5|6|7|8|9 :param name: String value of a quantity such as '500m', '1G' :returns: Quantity number :raises: exception.UnsupportedK8sQuantityFormat if the quantity string is a unsupported value """ signed_num_regex = r"(^\d+\.\d+)|(^\d+\.)|(\.\d+)|(^\d+)" matched_signed_number = re.search(signed_num_regex, quantity) if matched_signed_number is None: raise exception.UnsupportedK8sQuantityFormat() else: signed_number = matched_signed_number.group(0) suffix = quantity.replace(signed_number, '', 1) if suffix == '': return float(quantity) if re.search(r"^(Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'')$", suffix): return float(signed_number) * MEMORY_UNITS[suffix] elif re.search(r"^[E|e][+|-]?(\d+\.\d+$)|(\d+\.$)|(\.\d+$)|(\d+$)", suffix): return float(signed_number) * (10 ** float(suffix[1:])) else: raise exception.UnsupportedK8sQuantityFormat() def get_docker_quantity(quantity): """This function is used to get swarm Memory quantity. Memory format must be in the format of: suffix = b | k | m | g eg: 100m = 104857600 :raises: exception.UnsupportedDockerQuantityFormat if the quantity string is a unsupported value """ matched_unsigned_number = re.search(r"(^\d+)", quantity) if matched_unsigned_number is None: raise exception.UnsupportedDockerQuantityFormat() else: unsigned_number = matched_unsigned_number.group(0) suffix = quantity.replace(unsigned_number, '', 1) if suffix == '': return int(quantity) if re.search(r"^(b|k|m|g)$", suffix): return int(unsigned_number) * DOCKER_MEMORY_UNITS[suffix] raise exception.UnsupportedDockerQuantityFormat() def generate_password(length, symbolgroups=None): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ if symbolgroups is None: symbolgroups = CONF.password_symbols r = random.SystemRandom() # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [r.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. r.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([r.choice(symbols) for _i in range(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group r.shuffle(password) return ''.join(password) def get_openstack_ca(): openstack_ca_file = CONF.drivers.openstack_ca_file if openstack_ca_file: with open(openstack_ca_file) as fd: return fd.read() else: return '' magnum-6.1.0/magnum/common/clients.py0000666000175100017510000001650413244017334017626 0ustar zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient.v1 import client as barbicanclient from glanceclient import client as glanceclient from heatclient import client as heatclient from keystoneauth1.exceptions import catalog from neutronclient.v2_0 import client as neutronclient from novaclient import client as novaclient from oslo_log import log as logging from magnum.common import exception from magnum.common import keystone import magnum.conf CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class OpenStackClients(object): """Convenience class to create and cache client instances.""" def __init__(self, context): self.context = context self._keystone = None self._heat = None self._glance = None self._barbican = None self._nova = None self._neutron = None def url_for(self, **kwargs): return self.keystone().session.get_endpoint(**kwargs) def magnum_url(self): endpoint_type = self._get_client_option('magnum', 'endpoint_type') region_name = self._get_client_option('magnum', 'region_name') try: return self.url_for(service_type='container-infra', interface=endpoint_type, region_name=region_name) except catalog.EndpointNotFound: url = self.url_for(service_type='container', interface=endpoint_type, region_name=region_name) LOG.warning('Service type "container" is deprecated and will ' 'be removed in a subsequent release') return url def cinder_region_name(self): cinder_region_name = self._get_client_option('cinder', 'region_name') return self.keystone().get_validate_region_name(cinder_region_name) @property def auth_url(self): return self.keystone().auth_url @property def auth_token(self): return self.context.auth_token or self.keystone().auth_token def keystone(self): if self._keystone: return self._keystone self._keystone = keystone.KeystoneClientV3(self.context) return self._keystone def _get_client_option(self, client, option): return getattr(getattr(CONF, '%s_client' % client), option) @exception.wrap_keystone_exception def heat(self): if self._heat: return self._heat endpoint_type = self._get_client_option('heat', 'endpoint_type') region_name = self._get_client_option('heat', 'region_name') heatclient_version = self._get_client_option('heat', 'api_version') endpoint = self.url_for(service_type='orchestration', interface=endpoint_type, region_name=region_name) args = { 'endpoint': endpoint, 'auth_url': self.auth_url, 'token': self.auth_token, 'username': None, 'password': None, 'ca_file': self._get_client_option('heat', 'ca_file'), 'cert_file': self._get_client_option('heat', 'cert_file'), 'key_file': self._get_client_option('heat', 'key_file'), 'insecure': self._get_client_option('heat', 'insecure') } self._heat = heatclient.Client(heatclient_version, **args) return self._heat @exception.wrap_keystone_exception def glance(self): if self._glance: return self._glance endpoint_type = self._get_client_option('glance', 'endpoint_type') region_name = self._get_client_option('glance', 'region_name') glanceclient_version = self._get_client_option('glance', 'api_version') endpoint = self.url_for(service_type='image', interface=endpoint_type, region_name=region_name) args = { 'endpoint': endpoint, 'auth_url': self.auth_url, 'token': self.auth_token, 'username': None, 'password': None, 'cacert': self._get_client_option('glance', 'ca_file'), 'cert': self._get_client_option('glance', 'cert_file'), 'key': self._get_client_option('glance', 'key_file'), 'insecure': self._get_client_option('glance', 'insecure') } self._glance = glanceclient.Client(glanceclient_version, **args) return self._glance @exception.wrap_keystone_exception def barbican(self): if self._barbican: return self._barbican endpoint_type = self._get_client_option('barbican', 'endpoint_type') region_name = self._get_client_option('barbican', 'region_name') endpoint = self.url_for(service_type='key-manager', interface=endpoint_type, region_name=region_name) session = self.keystone().session self._barbican = barbicanclient.Client(session=session, endpoint=endpoint) return self._barbican @exception.wrap_keystone_exception def nova(self): if self._nova: return self._nova endpoint_type = self._get_client_option('nova', 'endpoint_type') region_name = self._get_client_option('nova', 'region_name') novaclient_version = self._get_client_option('nova', 'api_version') endpoint = self.url_for(service_type='compute', interface=endpoint_type, region_name=region_name) args = { 'cacert': self._get_client_option('nova', 'ca_file'), 'insecure': self._get_client_option('nova', 'insecure') } session = self.keystone().session self._nova = novaclient.Client(novaclient_version, session=session, endpoint_override=endpoint, **args) return self._nova @exception.wrap_keystone_exception def neutron(self): if self._neutron: return self._neutron endpoint_type = self._get_client_option('neutron', 'endpoint_type') region_name = self._get_client_option('neutron', 'region_name') endpoint = self.url_for(service_type='network', interface=endpoint_type, region_name=region_name) args = { 'auth_url': self.auth_url, 'token': self.auth_token, 'endpoint_url': endpoint, 'endpoint_type': endpoint_type, 'ca_cert': self._get_client_option('neutron', 'ca_file'), 'insecure': self._get_client_option('neutron', 'insecure') } self._neutron = neutronclient.Client(**args) return self._neutron magnum-6.1.0/magnum/common/policy.py0000666000175100017510000001331613244017334017462 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For magnum.""" import decorator from oslo_config import cfg from oslo_policy import policy from oslo_utils import importutils import pecan from magnum.common import clients from magnum.common import exception from magnum.common import policies _ENFORCER = None CONF = cfg.CONF # we can get a policy enforcer by this init. # oslo policy support change policy rule dynamically. # at present, policy.enforce will reload the policy rules when it checks # the policy files have been touched. def init(policy_file=None, rules=None, default_rule=None, use_conf=True, overwrite=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, ``conf.policy_file`` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. If :meth:`load_rules` with ``force_reload=True``, :meth:`clear` or :meth:`set_rules` with ``overwrite=True`` is called this will be overwritten. :param default_rule: Default rule to use, conf.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. :param overwrite: Whether to overwrite existing rules when reload rules from config file. """ global _ENFORCER if not _ENFORCER: # http://docs.openstack.org/developer/oslo.policy/usage.html _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf, overwrite=overwrite) _ENFORCER.register_defaults(policies.list_rules()) return _ENFORCER def enforce(context, rule=None, target=None, do_raise=True, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param dict context: As much information about the user performing the action as possible. :param rule: The rule to evaluate. :param dict target: As much information about the object being operated on as possible. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`enforce` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :return: ``False`` if the policy does not allow the action and `exc` is not provided; otherwise, returns a value that evaluates to ``True``. Note: for rules using the "case" expression, this ``True`` value will be the specified string from the expression. """ enforcer = init() credentials = context.to_dict() if not exc: exc = exception.PolicyNotAuthorized if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} add_policy_attributes(target) return enforcer.enforce(rule, target, credentials, do_raise=do_raise, exc=exc, *args, **kwargs) def add_policy_attributes(target): """Adds extra information for policy enforcement to raw target object""" context = importutils.import_module('magnum.common.context') admin_context = context.make_admin_context() admin_osc = clients.OpenStackClients(admin_context) trustee_domain_id = admin_osc.keystone().trustee_domain_id target['trustee_domain_id'] = trustee_domain_id return target def check_is_admin(context): """Whether or not user is admin according to policy setting. """ init() target = {} credentials = context.to_dict() return _ENFORCER.enforce('context_is_admin', target, credentials) def enforce_wsgi(api_name, act=None): """This is a decorator to simplify wsgi action policy rule check. :param api_name: The collection name to be evaluate. :param act: The function name of wsgi action. example: from magnum.common import policy class ClustersController(rest.RestController): .... @policy.enforce_wsgi("cluster", "delete") @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): ... """ @decorator.decorator def wrapper(fn, *args, **kwargs): action = "%s:%s" % (api_name, (act or fn.__name__)) enforce(pecan.request.context, action, exc=exception.PolicyNotAuthorized, action=action) return fn(*args, **kwargs) return wrapper magnum-6.1.0/magnum/common/short_id.py0000666000175100017510000000375613244017334020005 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for creating short ID strings based on a random UUID. The IDs each comprise 12 (lower-case) alphanumeric characters. """ import base64 import uuid import six from magnum.i18n import _ def _to_byte_string(value, num_bits): """Convert an integer to a big-endian string of bytes with padding. Padding is added at the end (i.e. after the least-significant bit) if required. """ shifts = six.moves.xrange(num_bits - 8, -8, -8) byte_at = lambda off: (value >> off if off >= 0 else value << -off) & 0xff return ''.join(chr(byte_at(offset)) for offset in shifts) def get_id(source_uuid): """Derive a short (12 character) id from a random UUID. The supplied UUID must be a version 4 UUID object. """ if isinstance(source_uuid, six.string_types): source_uuid = uuid.UUID(source_uuid) if source_uuid.version != 4: raise ValueError(_('Invalid UUID version (%d)') % source_uuid.version) # The "time" field of a v4 UUID contains 60 random bits # (see RFC4122, Section 4.4) random_bytes = _to_byte_string(source_uuid.time, 60) # The first 12 bytes (= 60 bits) of base32-encoded output is our data encoded = base64.b32encode(six.b(random_bytes))[:12] if six.PY3: return encoded.lower().decode('utf-8') else: return encoded.lower() def generate_id(): """Generate a short (12 character), random id.""" return get_id(uuid.uuid4()) magnum-6.1.0/magnum/cmd/0000775000175100017510000000000013244017675015066 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/cmd/driver_manage.py0000666000175100017510000000645713244017334020251 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for magnum-driver-manage.""" import sys from cliff import app from cliff import commandmanager from cliff import lister import magnum.conf from magnum.drivers.common import driver from magnum import version CONF = magnum.conf.CONF class DriverList(lister.Lister): """List templates""" def _print_rows(self, parsed_args, rows): fields = ['name'] field_labels = ['Name'] if parsed_args.details: fields.extend(['server_type', 'os', 'coe']) field_labels.extend(['Server_Type', 'OS', 'COE']) if parsed_args.paths: fields.append('path') field_labels.append('Template Path') return field_labels, [tuple([row[field] for field in fields]) for row in rows] def get_parser(self, prog_name): parser = super(DriverList, self).get_parser(prog_name) parser.add_argument('-d', '--details', action='store_true', dest='details', help=('display the cluster types provided by ' 'each template')) parser.add_argument('-p', '--paths', action='store_true', dest='paths', help='display the path to each template file') return parser def take_action(self, parsed_args): rows = [] for entry_point, cls in driver.Driver.load_entry_points(): name = entry_point.name definition = cls().get_template_definition() template = dict(name=name, path=definition.template_path) if parsed_args.details: for cluster_type in cls().provides: row = dict() row.update(template) row.update(cluster_type) rows.append(row) else: rows.append(template) return self._print_rows(parsed_args, rows) class DriverCommandManager(commandmanager.CommandManager): COMMANDS = { "list-drivers": DriverList, } def load_commands(self, namespace): for name, command_class in self.COMMANDS.items(): self.add_command(name, command_class) class DriverManager(app.App): def __init__(self): super(DriverManager, self).__init__( description='Magnum Driver Manager', version=version.version_info, command_manager=DriverCommandManager('magnum'), deferred_help=True) def main(args=None): if args is None: args = sys.argv[1:] CONF([], project='magnum', version=version.version_info.release_string()) return DriverManager().run(args) magnum-6.1.0/magnum/cmd/conductor.py0000777000175100017510000000474313244017334017445 0ustar zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for the Magnum conductor service.""" import os import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_service import service from magnum.common import rpc_service from magnum.common import service as magnum_service from magnum.common import short_id from magnum.conductor.handlers import ca_conductor from magnum.conductor.handlers import cluster_conductor from magnum.conductor.handlers import conductor_listener from magnum.conductor.handlers import federation_conductor from magnum.conductor.handlers import indirection_api import magnum.conf from magnum import version CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) def main(): magnum_service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) LOG.info('Starting server in PID %s', os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) conductor_id = short_id.generate_id() endpoints = [ indirection_api.Handler(), cluster_conductor.Handler(), conductor_listener.Handler(), ca_conductor.Handler(), federation_conductor.Handler(), ] server = rpc_service.Service.create(CONF.conductor.topic, conductor_id, endpoints, binary='magnum-conductor') workers = CONF.conductor.workers if not workers: workers = processutils.get_worker_count() launcher = service.launch(CONF, server, workers=workers) # NOTE(mnaser): We create the periodic tasks here so that they # can be attached to the main process and not # duplicated in all the children if multiple # workers are being used. server.create_periodic_tasks() server.start() launcher.wait() magnum-6.1.0/magnum/cmd/api.py0000777000175100017510000000523613244017334016214 0ustar zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for the Magnum API service.""" import os import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from werkzeug import serving from magnum.api import app as api_app from magnum.common import profiler from magnum.common import service import magnum.conf from magnum.i18n import _ from magnum.objects import base from magnum import version CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) def _get_ssl_configs(use_ssl): if use_ssl: cert_file = CONF.api.ssl_cert_file key_file = CONF.api.ssl_key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) return cert_file, key_file else: return None def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) # Enable object backporting via the conductor base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI() app = api_app.load_app() # Setup OSprofiler for WSGI service profiler.setup('magnum-api', CONF.host) # SSL configuration use_ssl = CONF.api.enabled_ssl # Create the WSGI server and start it host, port = CONF.api.host, CONF.api.port LOG.info('Starting server in PID %s', os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) LOG.info('Serving on %(proto)s://%(host)s:%(port)s', dict(proto="https" if use_ssl else "http", host=host, port=port)) workers = CONF.api.workers if not workers: workers = processutils.get_worker_count() LOG.info('Server will handle each request in a new process up to' ' %s concurrent processes', workers) serving.run_simple(host, port, app, processes=workers, ssl_context=_get_ssl_configs(use_ssl)) magnum-6.1.0/magnum/cmd/db_manage.py0000666000175100017510000000362213244017334017332 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for magnum-db-manage.""" from oslo_config import cfg from magnum.db import migration CONF = cfg.CONF def do_version(): print('Current DB revision is %s' % migration.version()) def do_upgrade(): migration.upgrade(CONF.command.revision) def do_stamp(): migration.stamp(CONF.command.revision) def do_revision(): migration.revision(message=CONF.command.message, autogenerate=CONF.command.autogenerate) def add_command_parsers(subparsers): parser = subparsers.add_parser('version') parser.set_defaults(func=do_version) parser = subparsers.add_parser('upgrade') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser('stamp') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) def main(): CONF.register_cli_opt(command_opt) CONF(project='magnum') CONF.command.func() magnum-6.1.0/magnum/cmd/__init__.py0000666000175100017510000000132313244017334017170 0ustar zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(hieulq): we monkey patch all eventlet services for easier tracking/debug import eventlet eventlet.monkey_patch() magnum-6.1.0/magnum/hacking/0000775000175100017510000000000013244017675015727 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/hacking/checks.py0000777000175100017510000001366013244017334017544 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import pep8 # noqa """ Guidelines for writing new hacking checks - Use only for Magnum specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range M3xx. Find the current test with the highest allocated number and then pick the next value. If nova has an N3xx code for that test, use the same number. - Keep the test method code in the source file ordered based on the M3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to magnum/tests/unit/test_hacking.py """ UNDERSCORE_IMPORT_FILES = [] mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") assert_equal_in_end_with_true_or_false_re = re.compile( r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") assert_equal_in_start_with_true_or_false_re = re.compile( r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") assert_equal_with_is_not_none_re = re.compile( r"assertEqual\(.*?\s+is+\s+not+\s+None\)$") assert_true_isinstance_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " "(\w|\.|\'|\"|\[|\])+\)\)") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") assert_xrange_re = re.compile( r"\s*xrange\s*\(") log_translation = re.compile( r"(.)*LOG\.(audit|error|critical)\(\s*('|\")") log_translation_info = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_exception = re.compile( r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") underscore_import_check = re.compile(r"(.)*import _(.)*") translated_log = re.compile( r"(.)*LOG\.(audit|error|info|critical|exception)" "\(\s*_\(\s*('|\")") string_translation = re.compile(r"[^_]*_\(\s*('|\")") def no_mutable_default_args(logical_line): msg = "M322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) def assert_equal_not_none(logical_line): """Check for assertEqual(A is not None) sentences M302""" msg = "M302: assertEqual(A is not None) sentences not allowed." res = assert_equal_with_is_not_none_re.search(logical_line) if res: yield (0, msg) def assert_true_isinstance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences M316 """ if assert_true_isinstance_re.match(logical_line): yield (0, "M316: assertTrue(isinstance(a, b)) sentences not allowed") def assert_equal_in(logical_line): """Check for assertEqual(True|False, A in B), assertEqual(A in B, True|False) M338 """ res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or assert_equal_in_end_with_true_or_false_re.search(logical_line)) if res: yield (0, "M338: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in B, True/False) when checking collection " "contents.") def no_xrange(logical_line): """Disallow 'xrange()' M339 """ if assert_xrange_re.match(logical_line): yield(0, "M339: Do not use xrange().") def use_timeutils_utcnow(logical_line, filename): # tools are OK to use the standard datetime module if "/tools/" in filename: return msg = "M310: timeutils.utcnow() must be used instead of datetime.%s()" datetime_funcs = ['now', 'utcnow'] for f in datetime_funcs: pos = logical_line.find('datetime.%s' % f) if pos != -1: yield (pos, msg % f) def dict_constructor_with_list_copy(logical_line): msg = ("M336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def no_log_warn(logical_line): """Disallow 'LOG.warn(' Deprecated LOG.warn(), instead use LOG.warning https://bugs.launchpad.net/magnum/+bug/1508442 M352 """ msg = ("M352: LOG.warn is deprecated, please use LOG.warning!") if "LOG.warn(" in logical_line: yield (0, msg) def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif (translated_log.match(logical_line) or string_translation.match(logical_line)): yield(0, "M340: Found use of _() without explicit import of _ !") def factory(register): register(no_mutable_default_args) register(assert_equal_not_none) register(assert_true_isinstance) register(assert_equal_in) register(use_timeutils_utcnow) register(dict_constructor_with_list_copy) register(no_xrange) register(no_log_warn) register(check_explicit_underscore_import) magnum-6.1.0/magnum/hacking/__init__.py0000666000175100017510000000000013244017334020020 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/service/0000775000175100017510000000000013244017675015763 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/service/periodic.py0000777000175100017510000001655113244017334020140 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_log import log from oslo_service import loopingcall from oslo_service import periodic_task from pycadf import cadftaxonomy as taxonomy from magnum.common import context from magnum.common import profiler from magnum.common import rpc from magnum.conductor import monitors from magnum.conductor import utils as conductor_utils import magnum.conf from magnum.drivers.common import driver from magnum import objects CONF = magnum.conf.CONF LOG = log.getLogger(__name__) def set_context(func): @functools.wraps(func) def handler(self, ctx): ctx = context.make_admin_context(all_tenants=True) context.set_ctx(ctx) func(self, ctx) context.set_ctx(None) return handler class ClusterUpdateJob(object): status_to_event = { objects.fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE, objects.fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE, objects.fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE, objects.fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE, objects.fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE } def __init__(self, ctx, cluster): self.ctx = ctx self.cluster = cluster def update_status(self): LOG.debug("Updating status for cluster %s", self.cluster.id) # get the driver for the cluster cdriver = driver.Driver.get_driver_for_cluster(self.ctx, self.cluster) # ask the driver to sync status cdriver.update_cluster_status(self.ctx, self.cluster) LOG.debug("Status for cluster %s updated to %s (%s)", self.cluster.id, self.cluster.status, self.cluster.status_reason) # status update notifications if self.cluster.status.endswith("_COMPLETE"): conductor_utils.notify_about_cluster_operation( self.ctx, self.status_to_event[self.cluster.status], taxonomy.OUTCOME_SUCCESS) if self.cluster.status.endswith("_FAILED"): conductor_utils.notify_about_cluster_operation( self.ctx, self.status_to_event[self.cluster.status], taxonomy.OUTCOME_FAILURE) # if we're done with it, delete it if self.cluster.status == objects.fields.ClusterStatus.DELETE_COMPLETE: self.cluster.destroy() # end the "loop" raise loopingcall.LoopingCallDone() @profiler.trace_cls("rpc") class MagnumPeriodicTasks(periodic_task.PeriodicTasks): '''Magnum periodic Task class Any periodic task job need to be added into this class NOTE(suro-patz): - oslo_service.periodic_task runs tasks protected within try/catch block, with default raise_on_error as 'False', in run_periodic_tasks(), which ensures the process does not die, even if a task encounters an Exception. - The periodic tasks here does not necessarily need another try/catch block. The present try/catch block here helps putting magnum-periodic-task-specific log/error message. ''' def __init__(self, conf): super(MagnumPeriodicTasks, self).__init__(conf) self.notifier = rpc.get_notifier() @periodic_task.periodic_task(spacing=10, run_immediately=True) @set_context def sync_cluster_status(self, ctx): try: LOG.debug('Starting to sync up cluster status') # get all the clusters that are IN_PROGRESS status = [objects.fields.ClusterStatus.CREATE_IN_PROGRESS, objects.fields.ClusterStatus.UPDATE_IN_PROGRESS, objects.fields.ClusterStatus.DELETE_IN_PROGRESS, objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS] filters = {'status': status} clusters = objects.Cluster.list(ctx, filters=filters) if not clusters: return # synchronize with underlying orchestration for cluster in clusters: job = ClusterUpdateJob(ctx, cluster) # though this call isn't really looping, we use this # abstraction anyway to avoid dealing directly with eventlet # hooey lc = loopingcall.FixedIntervalLoopingCall(f=job.update_status) lc.start(1, stop_on_exception=True) except Exception as e: LOG.warning( "Ignore error [%s] when syncing up cluster status.", e, exc_info=True) @periodic_task.periodic_task(run_immediately=True) @set_context def _send_cluster_metrics(self, ctx): if not CONF.drivers.send_cluster_metrics: LOG.debug('Skip sending cluster metrics') return LOG.debug('Starting to send cluster metrics') for cluster in objects.Cluster.list(ctx): if cluster.status not in ( objects.fields.ClusterStatus.CREATE_COMPLETE, objects.fields.ClusterStatus.UPDATE_COMPLETE): continue monitor = monitors.create_monitor(ctx, cluster) if monitor is None: continue try: monitor.pull_data() except Exception as e: LOG.warning( "Skip pulling data from cluster %(cluster)s due to " "error: %(e)s", {'e': e, 'cluster': cluster.uuid}, exc_info=True) continue metrics = list() for name in monitor.get_metric_names(): try: metric = { 'name': name, 'value': monitor.compute_metric_value(name), 'unit': monitor.get_metric_unit(name), } metrics.append(metric) except Exception as e: LOG.warning("Skip adding metric %(name)s due to " "error: %(e)s", {'e': e, 'name': name}, exc_info=True) message = dict(metrics=metrics, user_id=cluster.user_id, project_id=cluster.project_id, resource_id=cluster.uuid) LOG.debug("About to send notification: '%s'", message) self.notifier.info(ctx, "magnum.cluster.metrics.update", message) def setup(conf, tg): pt = MagnumPeriodicTasks(conf) tg.add_dynamic_timer( pt.run_periodic_tasks, periodic_interval_max=conf.periodic_interval_max, context=None) magnum-6.1.0/magnum/service/__init__.py0000666000175100017510000000000013244017334020054 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/servicegroup/0000775000175100017510000000000013244017675017040 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/servicegroup/__init__.py0000666000175100017510000000000013244017334021131 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/servicegroup/magnum_service_periodic.py0000666000175100017510000000413313244017334024267 0ustar zuulzuul00000000000000# Copyright 2015 - Yahoo! Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnum Service Layer""" from oslo_log import log from oslo_service import periodic_task from magnum import objects from magnum.service import periodic LOG = log.getLogger(__name__) class MagnumServicePeriodicTasks(periodic_task.PeriodicTasks): '''Magnum periodic Task class Any periodic task job need to be added into this class ''' def __init__(self, conf, binary): self.magnum_service_ref = None self.host = conf.host self.binary = binary super(MagnumServicePeriodicTasks, self).__init__(conf) @periodic_task.periodic_task(run_immediately=True) @periodic.set_context def update_magnum_service(self, ctx): LOG.debug('Update magnum_service') if self.magnum_service_ref is None: self.magnum_service_ref = \ objects.MagnumService.get_by_host_and_binary( ctx, self.host, self.binary) if self.magnum_service_ref is None: magnum_service_dict = { 'host': self.host, 'binary': self.binary } self.magnum_service_ref = objects.MagnumService( ctx, **magnum_service_dict) self.magnum_service_ref.create() self.magnum_service_ref.report_state_up() def setup(conf, binary, tg): pt = MagnumServicePeriodicTasks(conf, binary) tg.add_dynamic_timer( pt.run_periodic_tasks, periodic_interval_max=conf.periodic_interval_max, context=None) magnum-6.1.0/magnum/api/0000775000175100017510000000000013244017675015074 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/servicegroup.py0000666000175100017510000000234713244017334020163 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import timeutils import magnum.conf from magnum.objects import magnum_service CONF = magnum.conf.CONF class ServiceGroup(object): def __init__(self): self.service_down_time = CONF.service_down_time def service_is_up(self, member): if not isinstance(member, magnum_service.MagnumService): raise TypeError if member.forced_down: return False last_heartbeat = (member.last_seen_up or member.updated_at or member.created_at) now = timeutils.utcnow(True) elapsed = timeutils.delta_seconds(last_heartbeat, now) is_up = abs(elapsed) <= self.service_down_time return is_up magnum-6.1.0/magnum/api/http_error.py0000666000175100017510000000474713244017334017644 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six from webob import exc class HTTPNotAcceptableAPIVersion(exc.HTTPNotAcceptable): # subclass of :class:`~HTTPNotAcceptable` # # This indicates the resource identified by the request is only # capable of generating response entities which have content # characteristics not acceptable according to the accept headers # sent in the request. # # code: 406, title: Not Acceptable # # differences from webob.exc.HTTPNotAcceptable: # # - additional max and min version parameters # - additional error info for code, title, and links code = 406 title = 'Not Acceptable' max_version = '' min_version = '' def __init__(self, detail=None, headers=None, comment=None, body_template=None, max_version='', min_version='', **kw): super(HTTPNotAcceptableAPIVersion, self).__init__( detail=detail, headers=headers, comment=comment, body_template=body_template, **kw) self.max_version = max_version self.min_version = min_version def __call__(self, environ, start_response): for err_str in self.app_iter: err = {} try: err = json.loads(err_str.decode('utf-8')) except ValueError: pass links = {'rel': 'help', 'href': 'http://developer.openstack.org' '/api-guide/compute/microversions.html'} err['max_version'] = self.max_version err['min_version'] = self.min_version err['code'] = "magnum.microversion-unsupported" err['links'] = [links] err['title'] = "Requested microversion is unsupported" self.app_iter = [six.b(json.dumps(err))] self.headers['Content-Length'] = str(len(self.app_iter[0])) return super(HTTPNotAcceptableAPIVersion, self).__call__( environ, start_response) magnum-6.1.0/magnum/api/attr_validator.py0000666000175100017510000002337613244017334020472 0ustar zuulzuul00000000000000# Copyright 2015 EasyStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glanceclient import exc as glance_exception from novaclient import exceptions as nova_exception from oslo_serialization import jsonutils as json from magnum.api import utils as api_utils from magnum.common import clients from magnum.common import exception from magnum.i18n import _ SUPPORTED_ISOLATION = ['filesystem/posix', 'filesystem/linux', 'filesystem/shared', 'posix/cpu', 'posix/mem', 'posix/disk', 'cgroups/cpu', 'cgroups/mem', 'docker/runtime', 'namespaces/pid'] SUPPORTED_IMAGE_PROVIDERS = ['docker', 'appc'] SUPPORTED_SWARM_STRATEGY = ['spread', 'binpack', 'random'] def validate_image(cli, image): """Validate image""" try: image_found = api_utils.get_openstack_resource(cli.glance().images, image, 'images') except (glance_exception.NotFound, exception.ResourceNotFound): raise exception.ImageNotFound(image_id=image) except glance_exception.HTTPForbidden: raise exception.ImageNotAuthorized(image_id=image) if not image_found.get('os_distro'): raise exception.OSDistroFieldNotFound(image_id=image) return image_found def validate_flavor(cli, flavor): """Validate flavor. If flavor is None, skip the validation and use the default value from the heat template. """ if flavor is None: return flavor_list = cli.nova().flavors.list() for f in flavor_list: if f.name == flavor or f.id == flavor: return raise exception.FlavorNotFound(flavor=flavor) def validate_keypair(cli, keypair): """Validate keypair""" try: cli.nova().keypairs.get(keypair) except nova_exception.NotFound: raise exception.KeyPairNotFound(keypair=keypair) def validate_external_network(cli, external_network): """Validate external network""" count = 0 ext_filter = {'router:external': True} networks = cli.neutron().list_networks(**ext_filter) for net in networks.get('networks'): if (net.get('name') == external_network or net.get('id') == external_network): count = count + 1 if count == 0: # Unable to find the external network. # Or the network is private. raise exception.ExternalNetworkNotFound(network=external_network) if count > 1: msg = _("Multiple external networks exist with same name '%s'. " "Please use the external network ID instead.") raise exception.Conflict(msg % external_network) def validate_fixed_network(cli, fixed_network): """Validate fixed network""" # TODO(houming):this method implement will be added after this # first pathch for Cluster's OpenStack resources validation is merged. pass def validate_labels(labels): """"Validate labels""" for attr, validate_method in labels_validators.items(): if labels.get(attr) is not None: validate_method(labels) def validate_labels_isolation(labels): """Validate mesos_slave_isolation""" mesos_slave_isolation = labels.get('mesos_slave_isolation') mesos_slave_isolation_list = mesos_slave_isolation.split(',') unsupported_isolations = set(mesos_slave_isolation_list) - set( SUPPORTED_ISOLATION) if (len(unsupported_isolations) > 0): raise exception.InvalidParameterValue(_( 'property "labels/mesos_slave_isolation" with value ' '"%(isolation_val)s" is not supported, supported values are: ' '%(supported_isolation)s') % { 'isolation_val': ', '.join(list(unsupported_isolations)), 'supported_isolation': ', '.join( SUPPORTED_ISOLATION + ['unspecified'])}) def validate_labels_image_providers(labels): """Validate mesos_slave_image_providers""" mesos_slave_image_providers = labels.get('mesos_slave_image_providers') mesos_slave_image_providers_list = mesos_slave_image_providers.split(',') isolation_with_valid_data = False for image_providers_val in mesos_slave_image_providers_list: image_providers_val = image_providers_val.lower() if image_providers_val not in SUPPORTED_IMAGE_PROVIDERS: raise exception.InvalidParameterValue(_( 'property "labels/mesos_slave_image_providers" with value ' '"%(image_providers)s" is not supported, supported values ' 'are: %(supported_image_providers)s') % { 'image_providers': image_providers_val, 'supported_image_providers': ', '.join( SUPPORTED_IMAGE_PROVIDERS + ['unspecified'])}) if image_providers_val == 'docker': mesos_slave_isolation = labels.get('mesos_slave_isolation') if mesos_slave_isolation is not None: mesos_slave_isolation_list = mesos_slave_isolation.split(',') for isolations_val in mesos_slave_isolation_list: if isolations_val == 'docker/runtime': isolation_with_valid_data = True if mesos_slave_isolation is None or not isolation_with_valid_data: raise exception.RequiredParameterNotProvided(_( "Docker runtime isolator has to be specified if 'docker' " "is included in 'mesos_slave_image_providers' Please add " "'docker/runtime' to 'mesos_slave_isolation' labels " "flags")) def validate_labels_executor_env_variables(labels): """Validate executor_environment_variables""" mesos_slave_executor_env_val = labels.get( 'mesos_slave_executor_env_variables') try: json.loads(mesos_slave_executor_env_val) except ValueError: err = (_("Json format error")) raise exception.InvalidParameterValue(err) def validate_labels_strategy(labels): """Validate swarm_strategy""" swarm_strategy = list(labels.get('swarm_strategy', "").split()) unsupported_strategy = set(swarm_strategy) - set( SUPPORTED_SWARM_STRATEGY) if (len(unsupported_strategy) > 0): raise exception.InvalidParameterValue(_( 'property "labels/swarm_strategy" with value ' '"%(strategy)s" is not supported, supported values are: ' '%(supported_strategies)s') % { 'strategy': ' '.join(list(unsupported_strategy)), 'supported_strategies': ', '.join( SUPPORTED_SWARM_STRATEGY + ['unspecified'])}) def validate_os_resources(context, cluster_template, cluster=None): """Validate ClusterTemplate's OpenStack Resources""" cli = clients.OpenStackClients(context) for attr, validate_method in validators.items(): if cluster and attr in cluster and cluster[attr]: if attr != 'labels': validate_method(cli, cluster[attr]) else: validate_method(cluster[attr]) elif attr in cluster_template and cluster_template[attr] is not None: if attr != 'labels': validate_method(cli, cluster_template[attr]) else: validate_method(cluster_template[attr]) if cluster: validate_keypair(cli, cluster['keypair']) def validate_master_count(cluster, cluster_template): if cluster['master_count'] > 1 and \ not cluster_template['master_lb_enabled']: raise exception.InvalidParameterValue(_( "master_count must be 1 when master_lb_enabled is False")) def validate_federation_hostcluster(cluster_uuid): """Validate Federation `hostcluster_id` parameter. If the parameter was not specified raise an `exceptions.InvalidParameterValue`. If the specified identifier does not identify any Cluster, raise `exception.ClusterNotFound` """ if cluster_uuid is not None: api_utils.get_resource('Cluster', cluster_uuid) else: raise exception.InvalidParameterValue( "No hostcluster specified. " "Please specify a hostcluster_id.") def validate_federation_properties(properties): """Validate Federation `properties` parameter.""" if properties is None: raise exception.InvalidParameterValue( "Please specify a `properties` " "dict for the federation.") # Currently, we only support the property `dns-zone`. if properties.get('dns-zone') is None: raise exception.InvalidParameterValue("No DNS zone specified. " "Please specify a `dns-zone`.") # Dictionary that maintains a list of validation functions validators = {'image_id': validate_image, 'flavor_id': validate_flavor, 'master_flavor_id': validate_flavor, 'external_network_id': validate_external_network, 'fixed_network': validate_fixed_network, 'labels': validate_labels} labels_validators = {'mesos_slave_isolation': validate_labels_isolation, 'mesos_slave_image_providers': validate_labels_image_providers, 'mesos_slave_executor_env_variables': validate_labels_executor_env_variables, 'swarm_strategy': validate_labels_strategy} magnum-6.1.0/magnum/api/middleware/0000775000175100017510000000000013244017675017211 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/middleware/__init__.py0000666000175100017510000000147513244017334021323 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.api.middleware import auth_token from magnum.api.middleware import parsable_error AuthTokenMiddleware = auth_token.AuthTokenMiddleware ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware __all__ = (AuthTokenMiddleware, ParsableErrorMiddleware) magnum-6.1.0/magnum/api/middleware/auth_token.py0000666000175100017510000000471313244017334021723 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from keystonemiddleware import auth_token from oslo_log import log from magnum.common import exception from magnum.common import utils from magnum.i18n import _ LOG = log.getLogger(__name__) class AuthTokenMiddleware(auth_token.AuthProtocol): """A wrapper on Keystone auth_token middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, conf, public_api_routes=None): if public_api_routes is None: public_api_routes = [] route_pattern_tpl = '%s(\.json)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in public_api_routes] except re.error as e: msg = _('Cannot compile public API routes: %s') % e LOG.error(msg) raise exception.ConfigInvalid(error_msg=msg) super(AuthTokenMiddleware, self).__init__(app, conf) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), self.public_api_routes)) if env['is_public_api']: return self._app(env, start_response) return super(AuthTokenMiddleware, self).__call__(env, start_response) @classmethod def factory(cls, global_config, **local_conf): public_routes = local_conf.get('acl_public_routes', '') public_api_routes = [path.strip() for path in public_routes.split(',')] def _factory(app): return cls(app, global_config, public_api_routes=public_api_routes) return _factory magnum-6.1.0/magnum/api/middleware/parsable_error.py0000666000175100017510000000742113244017334022563 0ustar zuulzuul00000000000000# Copyright ? 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json import six from magnum.i18n import _ class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def _update_errors(self, app_iter, status_code): errs = [] for err_str in app_iter: err = {} try: err = json.loads(err_str.decode('utf-8')) except ValueError: pass if 'title' in err and 'description' in err: title = err['title'] desc = err['description'] elif 'faultstring' in err: title = err['faultstring'].split('.', 1)[0] desc = err['faultstring'] else: title = '' desc = '' code = err['faultcode'].lower() if 'faultcode' in err else '' # if already formatted by custom exception, don't update if 'min_version' in err: errs.append(err) else: errs.append({ 'request_id': '', 'code': code, 'status': status_code, 'title': title, 'detail': desc, 'links': []}) return errs def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): errs = self._update_errors(app_iter, state['status_code']) body = [six.b(json.dumps({'errors': errs}))] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body magnum-6.1.0/magnum/api/expose.py0000666000175100017510000000150613244017334016745 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import wsmeext.pecan as wsme_pecan def expose(*args, **kwargs): """Ensure that only JSON, and not XML, is supported.""" if 'rest_content_types' not in kwargs: kwargs['rest_content_types'] = ('json',) return wsme_pecan.wsexpose(*args, **kwargs) magnum-6.1.0/magnum/api/app.wsgi0000666000175100017510000000136713244017334016550 0ustar zuulzuul00000000000000# -*- mode: python -*- # # Copyright 2017 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from magnum.api import app as api_app from magnum.common import service service.prepare_service(sys.argv) application = api_app.load_app() magnum-6.1.0/magnum/api/hooks.py0000666000175100017510000001004713244017334016565 0ustar zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks from magnum.common import context from magnum.conductor import api as conductor_api import magnum.conf CONF = magnum.conf.CONF class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request. The following HTTP request headers are used: X-User-Name: Used for context.user_name. X-User-Id: Used for context.user_id. X-Project-Name: Used for context.project. X-Project-Id: Used for context.project_id. X-Auth-Token: Used for context.auth_token. X-Roles: Used for context.roles. """ def before(self, state): headers = state.request.headers user_name = headers.get('X-User-Name') user_id = headers.get('X-User-Id') project = headers.get('X-Project-Name') project_id = headers.get('X-Project-Id') domain_id = headers.get('X-User-Domain-Id') domain_name = headers.get('X-User-Domain-Name') auth_token = headers.get('X-Auth-Token') roles = headers.get('X-Roles', '').split(',') auth_token_info = state.request.environ.get('keystone.token_info') auth_url = CONF.keystone_authtoken.auth_uri state.request.context = context.make_context( auth_token=auth_token, auth_url=auth_url, auth_token_info=auth_token_info, user_name=user_name, user_id=user_id, project_name=project, project_id=project_id, domain_id=domain_id, domain_name=domain_name, roles=roles) class RPCHook(hooks.PecanHook): """Attach the rpcapi object to the request so controllers can get to it.""" def before(self, state): state.request.rpcapi = conductor_api.API(context=state.request.context) class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. if 200 <= state.response.status_int < 400: return json_body = state.response.json # Do not remove traceback when server in debug mode (except 'Server' # errors when 'debuginfo' will be used for traces). if CONF.debug and json_body.get('faultcode') != 'Server': return faultsting = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultsting and (traceback_marker in faultsting): # Cut-off traceback. faultsting = faultsting.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultsting.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body magnum-6.1.0/magnum/api/config.py0000666000175100017510000000210313244017334016701 0ustar zuulzuul00000000000000# Copyright 2013 - Noorul Islam K M # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.api import hooks # Pecan Application Configurations app = { 'root': 'magnum.api.controllers.root.RootController', 'modules': ['magnum.api'], 'debug': False, 'hooks': [ hooks.ContextHook(), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), ], 'acl_public_routes': [ '/', '/v1', ], } # Custom Configurations must be in Python dictionary format:: # # foo = {'bar':'baz'} # # All configurations are accessible at:: # pecan.conf magnum-6.1.0/magnum/api/controllers/0000775000175100017510000000000013244017675017442 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/controllers/v1/0000775000175100017510000000000013244017675017770 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/controllers/v1/types.py0000666000175100017510000001421713244017334021505 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from oslo_utils import strutils from oslo_utils import uuidutils import wsme from wsme import types as wtypes from magnum.common import exception from magnum.common import utils from magnum.i18n import _ class MacAddressType(wtypes.UserType): """A simple MAC address type.""" basetype = wtypes.text name = 'macaddress' @staticmethod def validate(value): return utils.validate_and_normalize_mac(value) @staticmethod def frombasetype(value): if value is None: return None return MacAddressType.validate(value) class NameType(wtypes.UserType): """A logical name type.""" basetype = wtypes.text name = 'name' @staticmethod def validate(value): if not utils.is_name_safe(value): raise exception.InvalidName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return NameType.validate(value) class UuidType(wtypes.UserType): """A simple UUID type.""" basetype = wtypes.text name = 'uuid' @staticmethod def validate(value): if not uuidutils.is_uuid_like(value): raise exception.InvalidUUID(uuid=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidType.validate(value) class BooleanType(wtypes.UserType): """A simple boolean type.""" basetype = wtypes.text name = 'boolean' @staticmethod def validate(value): try: return strutils.bool_from_string(value, strict=True) except ValueError as e: # raise Invalid to return 400 (BadRequest) in the API raise exception.Invalid(e) @staticmethod def frombasetype(value): if value is None: return None return BooleanType.validate(value) class MultiType(wtypes.UserType): """A complex type that represents one or more types. Used for validating that a value is an instance of one of the types. :param types: Variable-length list of types. """ basetype = wtypes.text def __init__(self, *types): self.types = types def __str__(self): return ' | '.join(map(str, self.types)) def validate(self, value): for t in self.types: try: return wtypes.validate_value(t, value) except (exception.InvalidUUID, ValueError): pass else: raise ValueError( _("Wrong type. Expected '%(type)s', got '%(value)s'") % {'type': self.types, 'value': type(value)}) macaddress = MacAddressType() uuid = UuidType() name = NameType() uuid_or_name = MultiType(UuidType, NameType) boolean = BooleanType() class JsonPatchType(wtypes.Base): """A complex type that represents a single json-patch operation.""" path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), mandatory=True) op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), mandatory=True) value = MultiType(wtypes.text, int) # The class of the objects being patched. Override this in subclasses. # Should probably be a subclass of magnum.api.controllers.base.APIBase. _api_base = None # Attributes that are not required for construction, but which may not be # removed if set. Override in subclasses if needed. _extra_non_removable_attrs = set() # Set of non-removable attributes, calculated lazily. _non_removable_attrs = None @staticmethod def internal_attrs(): """Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. This method may be overwritten by derived class. """ return ['/created_at', '/id', '/links', '/updated_at', '/uuid', '/project_id', '/user_id'] @classmethod def non_removable_attrs(cls): """Returns a set of names of attributes that may not be removed. Attributes whose 'mandatory' property is True are automatically added to this set. To add additional attributes to the set, override the field _extra_non_removable_attrs in subclasses, with a set of the form {'/foo', '/bar'}. """ if cls._non_removable_attrs is None: cls._non_removable_attrs = cls._extra_non_removable_attrs.copy() if cls._api_base: fields = inspect.getmembers(cls._api_base, lambda a: not inspect.isroutine(a)) for name, field in fields: if getattr(field, 'mandatory', False): cls._non_removable_attrs.add('/%s' % name) return cls._non_removable_attrs @staticmethod def validate(patch): if patch.path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise wsme.exc.ClientSideError(msg % patch.path) if patch.path in patch.non_removable_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise wsme.exc.ClientSideError(msg % patch.path) if patch.op != 'remove': if not patch.value: msg = _("'add' and 'replace' operations needs value") raise wsme.exc.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value: ret['value'] = patch.value return ret magnum-6.1.0/magnum/api/controllers/v1/bay.py0000777000175100017510000005213713244017334021122 0ustar zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api.validation import validate_cluster_properties from magnum.common import clients from magnum.common import exception from magnum.common import name_generator from magnum.common import policy from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) class BayID(wtypes.Base): uuid = types.uuid def __init__(self, uuid): self.uuid = uuid class Bay(base.APIBase): """API representation of a bay. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a bay. """ _baymodel_id = None def _get_baymodel_id(self): return self._baymodel_id def _set_baymodel_id(self, value): if value and self._baymodel_id != value: try: baymodel = api_utils.get_resource('ClusterTemplate', value) self._baymodel_id = baymodel.uuid except exception.ClusterTemplateNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Cluster e.code = 400 # BadRequest raise elif value == wtypes.Unset: self._baymodel_id = wtypes.Unset uuid = types.uuid """Unique UUID for this bay""" name = wtypes.StringType(min_length=1, max_length=242, pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') """Name of this bay, max length is limited to 242 because of heat stack requires max length limit to 255, and Magnum amend a uuid length""" baymodel_id = wsme.wsproperty(wtypes.text, _get_baymodel_id, _set_baymodel_id, mandatory=True) """The baymodel UUID""" node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The node count for this bay. Default to 1 if not set""" master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The number of master nodes for this bay. Default to 1 if not set""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" labels = wtypes.DictType(str, str) """One or more key/value pairs""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The master flavor of this Bay""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this Bay""" bay_create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60) """Timeout for creating the bay in minutes. Default to 60 if not set""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated bay links""" stack_id = wsme.wsattr(wtypes.text, readonly=True) """Stack id of the heat stack""" status = wtypes.Enum(str, *fields.ClusterStatus.ALL) """Status of the bay from the heat stack""" status_reason = wtypes.text """Status reason of the bay from the heat stack""" discovery_url = wtypes.text """Url used for bay node discovery""" api_address = wsme.wsattr(wtypes.text, readonly=True) """Api address of cluster master node""" coe_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the COE software currently running in this cluster. Example: swarm version or kubernetes version.""" container_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the container software. Example: docker version.""" node_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster slave nodes""" master_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster master nodes""" bay_faults = wsme.wsattr(wtypes.DictType(str, wtypes.text)) """Fault info collected from the heat resources of this bay""" def __init__(self, **kwargs): super(Bay, self).__init__() self.fields = [] for field in objects.Cluster.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # Set the renamed attributes for bay backwards compatibility self.fields.append('baymodel_id') if 'baymodel_id' in kwargs.keys(): setattr(self, 'cluster_template_id', kwargs.get('baymodel_id', None)) setattr(self, 'baymodel_id', kwargs.get('baymodel_id', None)) else: setattr(self, 'baymodel_id', kwargs.get('cluster_template_id', None)) self.fields.append('bay_create_timeout') if 'bay_create_timeout' in kwargs.keys(): setattr(self, 'create_timeout', kwargs.get('bay_create_timeout', wtypes.Unset)) setattr(self, 'bay_create_timeout', kwargs.get('bay_create_timeout', wtypes.Unset)) else: setattr(self, 'bay_create_timeout', kwargs.get('create_timeout', wtypes.Unset)) self.fields.append('bay_faults') if 'bay_faults' in kwargs.keys(): setattr(self, 'faults', kwargs.get('bay_faults', wtypes.Unset)) setattr(self, 'bay_faults', kwargs.get('bay_faults', wtypes.Unset)) else: setattr(self, 'bay_faults', kwargs.get('faults', wtypes.Unset)) @staticmethod def _convert_with_links(bay, url, expand=True): if not expand: bay.unset_fields_except(['uuid', 'name', 'baymodel_id', 'docker_volume_size', 'labels', 'master_flavor_id', 'flavor_id', 'node_count', 'status', 'bay_create_timeout', 'master_count', 'stack_id']) bay.links = [link.Link.make_link('self', url, 'bays', bay.uuid), link.Link.make_link('bookmark', url, 'bays', bay.uuid, bookmark=True)] return bay @classmethod def convert_with_links(cls, rpc_bay, expand=True): bay = Bay(**rpc_bay.as_dict()) return cls._convert_with_links(bay, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', baymodel_id='4a96ac4b-2447-43f1-8ca6-9fd6f36d146d', node_count=2, master_count=1, docker_volume_size=1, labels={}, master_flavor_id=None, flavor_id=None, bay_create_timeout=15, stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', status=fields.ClusterStatus.CREATE_COMPLETE, status_reason="CREATE completed successfully", api_address='172.24.4.3', node_addresses=['172.24.4.4', '172.24.4.5'], created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), coe_version=None, container_version=None) return cls._convert_with_links(sample, 'http://localhost:9511', expand) def as_dict(self): """Render this object as a dict of its fields.""" # Override this for old bay values d = super(Bay, self).as_dict() d['cluster_template_id'] = d['baymodel_id'] del d['baymodel_id'] d['create_timeout'] = d['bay_create_timeout'] del d['bay_create_timeout'] if 'bay_faults' in d.keys(): d['faults'] = d['bay_faults'] del d['bay_faults'] return d class BayPatchType(types.JsonPatchType): _api_base = Bay @staticmethod def internal_attrs(): internal_attrs = ['/api_address', '/node_addresses', '/master_addresses', '/stack_id', '/ca_cert_ref', '/magnum_cert_ref', '/trust_id', '/trustee_user_name', '/trustee_password', '/trustee_user_id'] return types.JsonPatchType.internal_attrs() + internal_attrs class BayCollection(collection.Collection): """API representation of a collection of bays.""" bays = [Bay] """A list containing bays objects""" def __init__(self, **kwargs): self._type = 'bays' @staticmethod def convert_with_links(rpc_bays, limit, url=None, expand=False, **kwargs): collection = BayCollection() collection.bays = [Bay.convert_with_links(p, expand) for p in rpc_bays] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.bays = [Bay.sample(expand=False)] return sample class BaysController(base.Controller): """REST controller for Bays.""" def __init__(self): super(BaysController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _generate_name_for_bay(self, context): '''Generate a random name like: zeta-22-bay.''' name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-bay' def _get_bays_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, marker) bays = objects.Cluster.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return BayCollection.convert_with_links(bays, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(BayCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of bays. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'bay:get_all', action='bay:get_all') return self._get_bays_collection(marker, limit, sort_key, sort_dir) @expose.expose(BayCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of bays with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'bay:detail', action='bay:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "bays": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['bays', 'detail']) return self._get_bays_collection(marker, limit, sort_key, sort_dir, expand, resource_url) def _collect_fault_info(self, context, bay): """Collect fault info from heat resources of given bay and store them into bay.bay_faults. """ osc = clients.OpenStackClients(context) filters = {'status': 'FAILED'} try: failed_resources = osc.heat().resources.list( bay.stack_id, nested_depth=2, filters=filters) except Exception as e: failed_resources = [] LOG.warning("Failed to retrieve failed resources for " "bay %(bay)s from Heat stack %(stack)s " "due to error: %(e)s", {'bay': bay.uuid, 'stack': bay.stack_id, 'e': e}, exc_info=True) return {res.resource_name: res.resource_status_reason for res in failed_resources} @expose.expose(Bay, types.uuid_or_name) def get_one(self, bay_ident): """Retrieve information about the given bay. :param bay_ident: UUID of a bay or logical name of the bay. """ context = pecan.request.context bay = api_utils.get_resource('Cluster', bay_ident) policy.enforce(context, 'bay:get', bay.as_dict(), action='bay:get') bay = Bay.convert_with_links(bay) if bay.status in fields.ClusterStatus.STATUS_FAILED: bay.bay_faults = self._collect_fault_info(context, bay) return bay @base.Controller.api_version("1.1", "1.1") @expose.expose(Bay, body=Bay, status_code=201) def post(self, bay): """Create a new bay. :param bay: a bay within the request body. """ new_bay = self._post(bay) res_bay = pecan.request.rpcapi.cluster_create(new_bay, bay.bay_create_timeout) # Set the HTTP Location Header pecan.response.location = link.build_url('bays', res_bay.uuid) return Bay.convert_with_links(res_bay) @base.Controller.api_version("1.2") # noqa @expose.expose(BayID, body=Bay, status_code=202) def post(self, bay): """Create a new bay. :param bay: a bay within the request body. """ new_bay = self._post(bay) pecan.request.rpcapi.cluster_create_async(new_bay, bay.bay_create_timeout) return BayID(new_bay.uuid) def _post(self, bay): context = pecan.request.context policy.enforce(context, 'bay:create', action='bay:create') baymodel = objects.ClusterTemplate.get_by_uuid(context, bay.baymodel_id) # If docker_volume_size is not present, use baymodel value if bay.docker_volume_size == wtypes.Unset: bay.docker_volume_size = baymodel.docker_volume_size # If labels is not present, use baymodel value if bay.labels is None: bay.labels = baymodel.labels # If master_flavor_id is not present, use baymodel value if bay.master_flavor_id == wtypes.Unset or not bay.master_flavor_id: bay.master_flavor_id = baymodel.master_flavor_id # If flavor_id is not present, use baymodel value if bay.flavor_id == wtypes.Unset or not bay.flavor_id: bay.flavor_id = baymodel.flavor_id bay_dict = bay.as_dict() bay_dict['keypair'] = baymodel.keypair_id attr_validator.validate_os_resources(context, baymodel.as_dict(), bay_dict) attr_validator.validate_master_count(bay.as_dict(), baymodel.as_dict()) bay_dict['project_id'] = context.project_id bay_dict['user_id'] = context.user_id # NOTE(yuywz): We will generate a random human-readable name for # bay if the name is not specified by user. name = bay_dict.get('name') or self._generate_name_for_bay(context) bay_dict['name'] = name bay_dict['coe_version'] = None bay_dict['container_version'] = None new_bay = objects.Cluster(context, **bay_dict) new_bay.uuid = uuid.uuid4() return new_bay @base.Controller.api_version("1.1", "1.1") @wsme.validate(types.uuid, [BayPatchType]) @expose.expose(Bay, types.uuid_or_name, body=[BayPatchType]) def patch(self, bay_ident, patch): """Update an existing bay. :param bay_ident: UUID or logical name of a bay. :param patch: a json PATCH document to apply to this bay. """ bay = self._patch(bay_ident, patch) res_bay = pecan.request.rpcapi.cluster_update(bay) return Bay.convert_with_links(res_bay) @base.Controller.api_version("1.2", "1.2") # noqa @wsme.validate(types.uuid, [BayPatchType]) @expose.expose(BayID, types.uuid_or_name, body=[BayPatchType], status_code=202) def patch(self, bay_ident, patch): """Update an existing bay. :param bay_ident: UUID or logical name of a bay. :param patch: a json PATCH document to apply to this bay. """ bay = self._patch(bay_ident, patch) pecan.request.rpcapi.cluster_update_async(bay) return BayID(bay.uuid) @base.Controller.api_version("1.3") # noqa @wsme.validate(types.uuid, bool, [BayPatchType]) @expose.expose(BayID, types.uuid_or_name, types.boolean, body=[BayPatchType], status_code=202) def patch(self, bay_ident, rollback=False, patch=None): """Update an existing bay. :param bay_ident: UUID or logical name of a bay. :param rollback: whether to rollback bay on update failure. :param patch: a json PATCH document to apply to this bay. """ bay = self._patch(bay_ident, patch) pecan.request.rpcapi.cluster_update_async(bay, rollback=rollback) return BayID(bay.uuid) def _patch(self, bay_ident, patch): context = pecan.request.context bay = api_utils.get_resource('Cluster', bay_ident) policy.enforce(context, 'bay:update', bay.as_dict(), action='bay:update') try: bay_dict = bay.as_dict() new_bay = Bay(**api_utils.apply_jsonpatch(bay_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Cluster.fields: try: patch_val = getattr(new_bay, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if bay[field] != patch_val: bay[field] = patch_val delta = bay.obj_what_changed() validate_cluster_properties(delta) return bay @base.Controller.api_version("1.1", "1.1") @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, bay_ident): """Delete a bay. :param bay_ident: UUID of a bay or logical name of the bay. """ bay = self._delete(bay_ident) pecan.request.rpcapi.cluster_delete(bay.uuid) @base.Controller.api_version("1.2") # noqa @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, bay_ident): """Delete a bay. :param bay_ident: UUID of a bay or logical name of the bay. """ bay = self._delete(bay_ident) pecan.request.rpcapi.cluster_delete_async(bay.uuid) def _delete(self, bay_ident): context = pecan.request.context bay = api_utils.get_resource('Cluster', bay_ident) policy.enforce(context, 'bay:delete', bay.as_dict(), action='bay:delete') return bay magnum-6.1.0/magnum/api/controllers/v1/certificate.py0000666000175100017510000001551013244017334022620 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.common import exception from magnum.common import policy from magnum import objects class Certificate(base.APIBase): """API representation of a certificate. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a certificate. """ _cluster_uuid = None """uuid or logical name of cluster""" _cluster = None def _get_cluster_uuid(self): return self._cluster_uuid def _set_cluster_uuid(self, value): if value and self._cluster_uuid != value: try: self._cluster = api_utils.get_resource('Cluster', value) self._cluster_uuid = self._cluster.uuid except exception.ClusterNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Cluster e.code = 400 # BadRequest raise elif value == wtypes.Unset: self._cluster_uuid = wtypes.Unset bay_uuid = wsme.wsproperty(wtypes.text, _get_cluster_uuid, _set_cluster_uuid) """The bay UUID or id""" cluster_uuid = wsme.wsproperty(wtypes.text, _get_cluster_uuid, _set_cluster_uuid) """The cluster UUID or id""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated certificate links""" csr = wtypes.StringType(min_length=1) """"The Certificate Signing Request""" pem = wtypes.StringType() """"The Signed Certificate""" def __init__(self, **kwargs): super(Certificate, self).__init__() self.fields = [] for field in objects.Certificate.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # set the attribute for bay_uuid for backwards compatibility self.fields.append('bay_uuid') setattr(self, 'bay_uuid', kwargs.get('bay_uuid', self._cluster_uuid)) def get_cluster(self): if not self._cluster: self._cluster = api_utils.get_resource('Cluster', self.cluster_uuid) return self._cluster @staticmethod def _convert_with_links(certificate, url, expand=True): if not expand: certificate.unset_fields_except(['bay_uuid', 'cluster_uuid', 'csr', 'pem']) certificate.links = [link.Link.make_link('self', url, 'certificates', certificate.cluster_uuid), link.Link.make_link('bookmark', url, 'certificates', certificate.cluster_uuid, bookmark=True)] return certificate @classmethod def convert_with_links(cls, rpc_cert, expand=True): cert = Certificate(**rpc_cert.as_dict()) return cls._convert_with_links(cert, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', cluster_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', created_at=timeutils.utcnow(), csr='AAA....AAA') return cls._convert_with_links(sample, 'http://localhost:9511', expand) class CertificateController(base.Controller): """REST controller for Certificate.""" def __init__(self): super(CertificateController, self).__init__() _custom_actions = { 'detail': ['GET'], } @expose.expose(Certificate, types.uuid_or_name) def get_one(self, cluster_ident): """Retrieve CA information about the given cluster. :param cluster_ident: UUID of a cluster or logical name of the cluster. """ context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'certificate:get', cluster.as_dict(), action='certificate:get') certificate = pecan.request.rpcapi.get_ca_certificate(cluster) return Certificate.convert_with_links(certificate) @expose.expose(Certificate, body=Certificate, status_code=201) def post(self, certificate): """Sign a new certificate by the CA. :param certificate: a certificate within the request body. """ context = pecan.request.context cluster = certificate.get_cluster() policy.enforce(context, 'certificate:create', cluster.as_dict(), action='certificate:create') certificate_dict = certificate.as_dict() certificate_dict['project_id'] = context.project_id certificate_dict['user_id'] = context.user_id cert_obj = objects.Certificate(context, **certificate_dict) new_cert = pecan.request.rpcapi.sign_certificate(cluster, cert_obj) return Certificate.convert_with_links(new_cert) @expose.expose(None, types.uuid_or_name, status_code=202) def patch(self, cluster_ident): context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'certificate:rotate_ca', cluster.as_dict(), action='certificate:rotate_ca') if cluster.cluster_template.tls_disabled: raise exception.NotSupported("Rotating the CA certificate on a " "non-TLS cluster is not supported") pecan.request.rpcapi.rotate_ca_certificate(cluster) magnum-6.1.0/magnum/api/controllers/v1/federation.py0000666000175100017510000004306013244017334022457 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import exception from magnum.common import name_generator from magnum.common import policy import magnum.conf from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class FederationID(wtypes.Base): """API representation of a federation ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a federation ID. """ uuid = types.uuid def __init__(self, uuid): self.uuid = uuid class Federation(base.APIBase): """API representation of a federation. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a Federation. """ # Unique UUID for this federation. uuid = types.uuid # Name of this federation, max length is limited to 242 because heat stack # requires max length limit to 255, and Magnum amend a uuid length. name = wtypes.StringType(min_length=1, max_length=242, pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') # UUID of the hostcluster of the federation, i.e. the cluster that # hosts the COE Federated API. hostcluster_id = wsme.wsattr(wtypes.text) # List of UUIDs of all the member clusters of the federation. member_ids = wsme.wsattr([wtypes.text]) # Status of the federation. status = wtypes.Enum(str, *fields.FederationStatus.ALL) # Status reason of the federation. status_reason = wtypes.text # Set of federation metadata (COE-specific in some cases). properties = wtypes.DictType(str, str) # A list containing a self link and associated federations links links = wsme.wsattr([link.Link], readonly=True) def __init__(self, **kwargs): super(Federation, self).__init__() self.fields = [] for field in objects.Federation.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(federation, url, expand=True): if not expand: federation.unset_fields_except(['uuid', 'name', 'hostcluster_id', 'member_ids', 'status', 'properties']) federation.links = [link.Link.make_link('self', url, 'federations', federation.uuid), link.Link.make_link('bookmark', url, 'federations', federation.uuid, bookmark=True)] return federation @classmethod def convert_with_links(cls, rpc_federation, expand=True): federation = Federation(**rpc_federation.as_dict()) return cls._convert_with_links(federation, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='4221a353-8368-475f-b7de-3429d3f724b3', name='example', hostcluster_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', member_ids=['49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', 'f2439bcf-02a2-4278-9d8a-f07a2042230a', 'e549e0a5-3d3c-406f-bd7c-0e0182fb211c'], properties={'dns-zone': 'example.com.'}, status=fields.FederationStatus.CREATE_COMPLETE, status_reason="CREATE completed successfully") return cls._convert_with_links(sample, 'http://localhost:9511', expand) class FederationPatchType(types.JsonPatchType): _api_base = Federation @staticmethod def internal_attrs(): """"Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. """ internal_attrs = [] return types.JsonPatchType.internal_attrs() + internal_attrs class FederationCollection(collection.Collection): """API representation of a collection of federations.""" # A list containing federation objects. federations = [Federation] def __init__(self, **kwargs): self._type = 'federations' @staticmethod def convert_with_links(rpc_federation, limit, url=None, expand=False, **kwargs): collection = FederationCollection() collection.federations = [Federation.convert_with_links(p, expand) for p in rpc_federation] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.federations = [Federation.sample(expand=False)] return sample class FederationsController(base.Controller): """REST controller for federations.""" def __init__(self): super(FederationsController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _generate_name_for_federation(self, context): """Generate a random name like: phi-17-federation.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-federation' def _get_federation_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Federation.get_by_uuid(pecan.request.context, marker) federations = objects.Federation.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return FederationCollection.convert_with_links(federations, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(FederationCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of federations. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'federation:get_all', action='federation:get_all') return self._get_federation_collection(marker, limit, sort_key, sort_dir) @expose.expose(FederationCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of federation with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'federation:detail', action='federation:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "federations": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['federations', 'detail']) return self._get_federation_collection(marker, limit, sort_key, sort_dir, expand, resource_url) @expose.expose(Federation, types.uuid_or_name) def get_one(self, federation_ident): """Retrieve information about a given Federation. :param federation_ident: UUID or logical name of the Federation. """ context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:get', federation.as_dict(), action='federation:get') federation = Federation.convert_with_links(federation) return federation @expose.expose(FederationID, body=Federation, status_code=202) def post(self, federation): """Create a new federation. :param federation: a federation within the request body. """ context = pecan.request.context policy.enforce(context, 'federation:create', action='federation:create') federation_dict = federation.as_dict() # Validate `hostcluster_id` hostcluster_id = federation_dict.get('hostcluster_id') attr_validator.validate_federation_hostcluster(hostcluster_id) # Validate `properties` dict. properties_dict = federation_dict.get('properties') attr_validator.validate_federation_properties(properties_dict) federation_dict['project_id'] = context.project_id # If no name is specified, generate a random human-readable name name = (federation_dict.get('name') or self._generate_name_for_federation(context)) federation_dict['name'] = name new_federation = objects.Federation(context, **federation_dict) new_federation.uuid = uuid.uuid4() # TODO(clenimar): remove hard-coded `create_timeout`. pecan.request.rpcapi.federation_create_async(new_federation, create_timeout=15) return FederationID(new_federation.uuid) @expose.expose(FederationID, types.uuid_or_name, types.boolean, body=[FederationPatchType], status_code=202) def patch(self, federation_ident, rollback=False, patch=None): """Update an existing Federation. Please note that the join/unjoin operation is performed by patching `member_ids`. :param federation_ident: UUID or logical name of a federation. :param rollback: whether to rollback federation on update failure. :param patch: a json PATCH document to apply to this federation. """ federation = self._patch(federation_ident, patch) pecan.request.rpcapi.federation_update_async(federation, rollback) return FederationID(federation.uuid) def _patch(self, federation_ident, patch): context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:update', federation.as_dict(), action='federation:update') # NOTE(clenimar): Magnum does not allow one to append items to existing # fields through an `add` operation using HTTP PATCH (please check # `magnum.api.utils.apply_jsonpatch`). In order to perform the join # and unjoin operations, intercept the original JSON PATCH document # and change the operation from either `add` or `remove` to `replace`. patch_path = patch[0].get('path') patch_value = patch[0].get('value') patch_op = patch[0].get('op') if patch_path == '/member_ids': if patch_op == 'add' and patch_value is not None: patch = self._join_wrapper(federation_ident, patch) elif patch_op == 'remove' and patch_value is not None: patch = self._unjoin_wrapper(federation_ident, patch) try: federation_dict = federation.as_dict() new_federation = Federation( **api_utils.apply_jsonpatch(federation_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Retrieve only what changed after the patch. delta = self._update_changed_fields(federation, new_federation) validation.validate_federation_properties(delta) return federation def _update_changed_fields(self, federation, new_federation): """Update only the patches that were modified and return the diff.""" for field in objects.Federation.fields: try: patch_val = getattr(new_federation, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if federation[field] != patch_val: federation[field] = patch_val return federation.obj_what_changed() def _join_wrapper(self, federation_ident, patch): """Intercept PATCH JSON documents for join operations. Take a PATCH JSON document with `add` operation:: { 'op': 'add', 'value': 'new_member_id', 'path': '/member_ids' } and transform it into a document with `replace` operation:: { 'op': 'replace', 'value': ['current_member_id1', ..., 'new_member_id'], 'path': '/member_ids' } """ federation = api_utils.get_resource('Federation', federation_ident) new_member_uuid = patch[0]['value'] # Check if the cluster exists c = objects.Cluster.get_by_uuid(pecan.request.context, new_member_uuid) # Check if the cluster is already a member of the federation if new_member_uuid not in federation.member_ids and c is not None: # Retrieve all current members members = federation.member_ids # Add the new member members.append(c.uuid) else: kw = {'uuid': new_member_uuid, 'federation_name': federation.name} raise exception.MemberAlreadyExists(**kw) # Set `value` to the updated member list. Change `op` to `replace` patch[0]['value'] = members patch[0]['op'] = 'replace' return patch def _unjoin_wrapper(self, federation_ident, patch): """Intercept PATCH JSON documents for unjoin operations. Take a PATCH JSON document with `remove` operation:: { 'op': 'remove', 'value': 'former_member_id', 'path': '/member_ids' } and transform it into a document with `replace` operation:: { 'op': 'replace', 'value': ['current_member_id1', ..., 'current_member_idn'], 'path': '/member_ids' } """ federation = api_utils.get_resource('Federation', federation_ident) cluster_uuid = patch[0]['value'] # Check if the cluster exists c = objects.Cluster.get_by_uuid(pecan.request.context, cluster_uuid) # Check if the cluster is a member cluster and if it exists if cluster_uuid in federation.member_ids and c is not None: # Retrieve all current members members = federation.member_ids # Unjoin the member members.remove(cluster_uuid) else: raise exception.HTTPNotFound("Cluster %s is not a member of the " "federation %s." % (cluster_uuid, federation.name)) # Set `value` to the updated member list. Change `op` to `replace` patch[0]['value'] = members patch[0]['op'] = 'replace' return patch @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, federation_ident): """Delete a federation. :param federation_ident: UUID of federation or logical name of the federation. """ context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:delete', federation.as_dict(), action='federation:delete') pecan.request.rpcapi.federation_delete_async(federation.uuid) magnum-6.1.0/magnum/api/controllers/v1/stats.py0000666000175100017510000000504313244017334021474 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base from magnum.api import expose from magnum.common import exception from magnum.common import policy from magnum.i18n import _ from magnum import objects class Stats(base.APIBase): clusters = wtypes.IntegerType(minimum=0) nodes = wtypes.IntegerType(minimum=0) def __init__(self, **kwargs): self.fields = [] for field in objects.Stats.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @classmethod def convert(cls, rpc_stats): return Stats(**rpc_stats.as_dict()) class StatsController(base.Controller): """REST controller for Stats.""" def __init__(self, **kwargs): super(StatsController, self).__init__() @expose.expose(Stats, wtypes.text, wtypes.text) def get_all(self, project_id=None, type="cluster"): """Retrieve magnum stats. """ context = pecan.request.context policy.enforce(context, 'stats:get_all', action='stats:get_all') allowed_stats = ["cluster"] if type.lower() not in allowed_stats: msg = _("Invalid stats type. Allowed values are '%s'") allowed_str = ','.join(allowed_stats) raise exception.InvalidParameterValue(err=msg % allowed_str) # 1.If the requester is not an admin and trying to request stats for # different tenant, then reject the request # 2.If the requester is not an admin and project_id was not provided, # then return self stats if not context.is_admin: project_id = project_id if project_id else context.project_id if project_id != context.project_id: raise exception.NotAuthorized() stats = objects.Stats.get_cluster_stats(context, project_id) return Stats.convert(stats) magnum-6.1.0/magnum/api/controllers/v1/cluster_template.py0000666000175100017510000005040513244017334023714 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import clients from magnum.common import exception from magnum.common import name_generator from magnum.common import policy from magnum import objects from magnum.objects import fields class ClusterTemplate(base.APIBase): """API representation of a ClusterTemplate. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a ClusterTemplate. """ uuid = types.uuid """Unique UUID for this ClusterTemplate""" name = wtypes.StringType(min_length=1, max_length=255) """The name of the ClusterTemplate""" coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True) """The Container Orchestration Engine for this clustertemplate""" image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), mandatory=True) """The image name or UUID to use as an image for this ClusterTemplate""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this ClusterTemplate""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of the master node for this ClusterTemplate""" dns_nameserver = wtypes.IPv4AddressType() """The DNS nameserver address""" keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The name of the nova ssh keypair""" external_network_id = wtypes.StringType(min_length=1, max_length=255) """The external network to attach to the Cluster""" fixed_network = wtypes.StringType(min_length=1, max_length=255) """The fixed network name to attach to the Cluster""" fixed_subnet = wtypes.StringType(min_length=1, max_length=255) """The fixed subnet name to attach to the Cluster""" network_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container networks""" apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535) """The API server port for k8s""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" cluster_distro = wtypes.StringType(min_length=1, max_length=255) """The Cluster distro for the Cluster, e.g. coreos, fedora-atomic, etc.""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated ClusterTemplate links""" http_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTP requests and relay them. The format is a URL including a port number. """ https_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTPS requests and relay them. The format is a URL including a port number. """ no_proxy = wtypes.StringType(min_length=1, max_length=255) """A comma separated list of IPs for which proxies should not be used in the cluster """ volume_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container volumes""" registry_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether the docker registry is enabled""" labels = wtypes.DictType(str, str) """One or more key/value pairs""" tls_disabled = wsme.wsattr(types.boolean, default=False) """Indicates whether the TLS should be disabled""" public = wsme.wsattr(types.boolean, default=False) """Indicates whether the ClusterTemplate is public or not.""" server_type = wsme.wsattr(wtypes.Enum(str, *fields.ServerType.ALL), default='vm') """Server type for this ClusterTemplate """ insecure_registry = wtypes.StringType(min_length=1, max_length=255) """Insecure registry URL when creating a ClusterTemplate """ docker_storage_driver = wtypes.StringType(min_length=1, max_length=255) """Docker storage driver""" master_lb_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether created clusters should have a load balancer for master nodes or not. """ floating_ip_enabled = wsme.wsattr(types.boolean, default=True) """Indicates whether created clusters should have a floating ip or not.""" project_id = wsme.wsattr(wtypes.text, readonly=True) """Project id of the cluster belongs to""" user_id = wsme.wsattr(wtypes.text, readonly=True) """User id of the cluster belongs to""" def __init__(self, **kwargs): self.fields = [] for field in objects.ClusterTemplate.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(cluster_template, url): cluster_template.links = [link.Link.make_link('self', url, 'clustertemplates', cluster_template.uuid), link.Link.make_link('bookmark', url, 'clustertemplates', cluster_template.uuid, bookmark=True)] return cluster_template @classmethod def convert_with_links(cls, rpc_cluster_template): cluster_template = ClusterTemplate(**rpc_cluster_template.as_dict()) return cls._convert_with_links(cluster_template, pecan.request.host_url) @classmethod def sample(cls): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', image_id='Fedora-k8s', flavor_id='m1.small', master_flavor_id='m1.small', dns_nameserver='8.8.1.1', keypair_id='keypair1', external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', fixed_network='private', fixed_subnet='private-subnet', network_driver='libnetwork', volume_driver='cinder', apiserver_port=8080, docker_volume_size=25, docker_storage_driver='devicemapper', cluster_distro='fedora-atomic', coe=fields.ClusterType.KUBERNETES, http_proxy='http://proxy.com:123', https_proxy='https://proxy.com:123', no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', labels={'key1': 'val1', 'key2': 'val2'}, server_type='vm', insecure_registry='10.238.100.100:5000', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), public=False, master_lb_enabled=False, floating_ip_enabled=True) return cls._convert_with_links(sample, 'http://localhost:9511') class ClusterTemplatePatchType(types.JsonPatchType): _api_base = ClusterTemplate _extra_non_removable_attrs = {'/network_driver', '/external_network_id', '/tls_disabled', '/public', '/server_type', '/coe', '/registry_enabled', '/cluster_distro'} class ClusterTemplateCollection(collection.Collection): """API representation of a collection of ClusterTemplates.""" clustertemplates = [ClusterTemplate] """A list containing ClusterTemplates objects""" def __init__(self, **kwargs): self._type = 'clustertemplates' @staticmethod def convert_with_links(rpc_cluster_templates, limit, url=None, **kwargs): collection = ClusterTemplateCollection() collection.clustertemplates = [ClusterTemplate.convert_with_links(p) for p in rpc_cluster_templates] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.clustertemplates = [ClusterTemplate.sample()] return sample class ClusterTemplatesController(base.Controller): """REST controller for ClusterTemplates.""" _custom_actions = { 'detail': ['GET'], } def _generate_name_for_cluster_template(self, context): """Generate a random name like: zeta-22-model.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-template' def _get_cluster_templates_collection(self, marker, limit, sort_key, sort_dir, resource_url=None): context = pecan.request.context if context.is_admin: if resource_url == '/'.join(['clustertemplates', 'detail']): policy.enforce(context, "clustertemplate:detail_all_projects", action="clustertemplate:detail_all_projects") else: policy.enforce(context, "clustertemplate:get_all_all_projects", action="clustertemplate:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ClusterTemplate.get_by_uuid( pecan.request.context, marker) cluster_templates = objects.ClusterTemplate.list( pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterTemplateCollection.convert_with_links(cluster_templates, limit, url=resource_url, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of ClusterTemplates. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:get_all', action='clustertemplate:get_all') return self._get_cluster_templates_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of ClusterTemplates with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:detail', action='clustertemplate:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clustertemplates": raise exception.HTTPNotFound resource_url = '/'.join(['clustertemplates', 'detail']) return self._get_cluster_templates_collection(marker, limit, sort_key, sort_dir, resource_url) @expose.expose(ClusterTemplate, types.uuid_or_name) def get_one(self, cluster_template_ident): """Retrieve information about the given ClusterTemplate. :param cluster_template_ident: UUID or logical name of a ClusterTemplate. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "clustertemplate:get_one_all_projects", action="clustertemplate:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) if not cluster_template.public: policy.enforce(context, 'clustertemplate:get', cluster_template.as_dict(), action='clustertemplate:get') return ClusterTemplate.convert_with_links(cluster_template) @expose.expose(ClusterTemplate, body=ClusterTemplate, status_code=201) @validation.enforce_server_type() @validation.enforce_network_driver_types_create() @validation.enforce_volume_driver_types_create() @validation.enforce_volume_storage_size_create() @validation.enforce_driver_supported() def post(self, cluster_template): """Create a new ClusterTemplate. :param cluster_template: a ClusterTemplate within the request body. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:create', action='clustertemplate:create') cluster_template_dict = cluster_template.as_dict() cli = clients.OpenStackClients(context) attr_validator.validate_os_resources(context, cluster_template_dict) image_data = attr_validator.validate_image(cli, cluster_template_dict[ 'image_id']) cluster_template_dict['cluster_distro'] = image_data['os_distro'] cluster_template_dict['project_id'] = context.project_id cluster_template_dict['user_id'] = context.user_id # check permissions for making cluster_template public if cluster_template_dict['public']: if not policy.enforce(context, "clustertemplate:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() # NOTE(yuywz): We will generate a random human-readable name for # cluster_template if the name is not specified by user. arg_name = cluster_template_dict.get('name') name = arg_name or self._generate_name_for_cluster_template(context) cluster_template_dict['name'] = name new_cluster_template = objects.ClusterTemplate(context, **cluster_template_dict) new_cluster_template.create() # Set the HTTP Location Header pecan.response.location = link.build_url('clustertemplates', new_cluster_template.uuid) return ClusterTemplate.convert_with_links(new_cluster_template) @wsme.validate(types.uuid_or_name, [ClusterTemplatePatchType]) @expose.expose(ClusterTemplate, types.uuid_or_name, body=[ClusterTemplatePatchType]) @validation.enforce_network_driver_types_update() @validation.enforce_volume_driver_types_update() def patch(self, cluster_template_ident, patch): """Update an existing ClusterTemplate. :param cluster_template_ident: UUID or logic name of a ClusterTemplate. :param patch: a json PATCH document to apply to this ClusterTemplate. """ context = pecan.request.context cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) policy.enforce(context, 'clustertemplate:update', cluster_template.as_dict(), action='clustertemplate:update') try: cluster_template_dict = cluster_template.as_dict() new_cluster_template = ClusterTemplate(**api_utils.apply_jsonpatch( cluster_template_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) new_cluster_template_dict = new_cluster_template.as_dict() attr_validator.validate_os_resources(context, new_cluster_template_dict) # check permissions when updating ClusterTemplate public flag if cluster_template.public != new_cluster_template.public: if not policy.enforce(context, "clustertemplate:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() # Update only the fields that have changed for field in objects.ClusterTemplate.fields: try: patch_val = getattr(new_cluster_template, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if cluster_template[field] != patch_val: cluster_template[field] = patch_val cluster_template.save() return ClusterTemplate.convert_with_links(cluster_template) @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_template_ident): """Delete a ClusterTemplate. :param cluster_template_ident: UUID or logical name of a ClusterTemplate. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'clustertemplate:delete_all_projects', action='clustertemplate:delete_all_projects') context.all_tenants = True cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) policy.enforce(context, 'clustertemplate:delete', cluster_template.as_dict(), action='clustertemplate:delete') cluster_template.destroy() magnum-6.1.0/magnum/api/controllers/v1/magnum_services.py0000666000175100017510000000707213244017334023531 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import servicegroup as svcgrp_api from magnum.common import policy from magnum import objects from magnum.objects import fields class MagnumService(base.APIBase): host = wtypes.StringType(min_length=1, max_length=255) """Name of the host """ binary = wtypes.Enum(str, *fields.MagnumServiceBinary.ALL) """Name of the binary""" state = wtypes.Enum(str, *fields.MagnumServiceState.ALL) """State of the binary""" id = wsme.wsattr(wtypes.IntegerType(minimum=1)) """The id for the healthcheck record """ report_count = wsme.wsattr(wtypes.IntegerType(minimum=0)) """The number of times the heartbeat was reported """ disabled = wsme.wsattr(types.boolean, default=False) """If the service is 'disabled' administratively """ disabled_reason = wtypes.StringType(min_length=0, max_length=255) """Reason for disabling """ def __init__(self, state, **kwargs): super(MagnumService, self).__init__() self.fields = ['state'] setattr(self, 'state', state) for field in objects.MagnumService.fields: self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) class MagnumServiceCollection(collection.Collection): mservices = [MagnumService] """A list containing service objects""" def __init__(self, **kwargs): super(MagnumServiceCollection, self).__init__() self._type = 'mservices' @staticmethod def convert_db_rec_list_to_collection(servicegroup_api, rpc_msvcs, **kwargs): collection = MagnumServiceCollection() collection.mservices = [] for p in rpc_msvcs: alive = servicegroup_api.service_is_up(p) state = 'up' if alive else 'down' msvc = MagnumService(state, **p.as_dict()) collection.mservices.append(msvc) collection.next = collection.get_next(limit=None, url=None, **kwargs) return collection class MagnumServiceController(base.Controller): """REST controller for magnum-services.""" def __init__(self, **kwargs): super(MagnumServiceController, self).__init__() self.servicegroup_api = svcgrp_api.ServiceGroup() @expose.expose(MagnumServiceCollection) @policy.enforce_wsgi("magnum-service") def get_all(self): """Retrieve a list of magnum-services. """ msvcs = objects.MagnumService.list(pecan.request.context, limit=None, marker=None, sort_key='id', sort_dir='asc') return MagnumServiceCollection.convert_db_rec_list_to_collection( self.servicegroup_api, msvcs) magnum-6.1.0/magnum/api/controllers/v1/__init__.py0000666000175100017510000002432013244017334022074 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Version 1 of the Magnum API NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. """ from oslo_log import log as logging import pecan from wsme import types as wtypes from magnum.api.controllers import base as controllers_base from magnum.api.controllers import link from magnum.api.controllers.v1 import bay from magnum.api.controllers.v1 import baymodel from magnum.api.controllers.v1 import certificate from magnum.api.controllers.v1 import cluster from magnum.api.controllers.v1 import cluster_template from magnum.api.controllers.v1 import federation from magnum.api.controllers.v1 import magnum_services from magnum.api.controllers.v1 import quota from magnum.api.controllers.v1 import stats from magnum.api.controllers import versions as ver from magnum.api import expose from magnum.api import http_error from magnum.i18n import _ LOG = logging.getLogger(__name__) BASE_VERSION = 1 MIN_VER_STR = '%s %s' % (ver.Version.service_string, ver.BASE_VER) MAX_VER_STR = '%s %s' % (ver.Version.service_string, ver.CURRENT_MAX_VER) MIN_VER = ver.Version({ver.Version.string: MIN_VER_STR}, MIN_VER_STR, MAX_VER_STR) MAX_VER = ver.Version({ver.Version.string: MAX_VER_STR}, MIN_VER_STR, MAX_VER_STR) class MediaType(controllers_base.APIBase): """A media type representation.""" base = wtypes.text type = wtypes.text def __init__(self, base, type): self.base = base self.type = type class V1(controllers_base.APIBase): """The representation of the version 1 of the API.""" id = wtypes.text """The ID of the version, also acts as the release number""" media_types = [MediaType] """An array of supcontainersed media types for this version""" links = [link.Link] """Links that point to a specific URL for this version and documentation""" baymodels = [link.Link] """Links to the baymodels resource""" bays = [link.Link] """Links to the bays resource""" clustertemplates = [link.Link] """Links to the clustertemplates resource""" clusters = [link.Link] """Links to the clusters resource""" quotas = [link.Link] """Links to the quotas resource""" certificates = [link.Link] """Links to the certificates resource""" mservices = [link.Link] """Links to the magnum-services resource""" stats = [link.Link] """Links to the stats resource""" # Links to the federations resources federations = [link.Link] @staticmethod def convert(): v1 = V1() v1.id = "v1" v1.links = [link.Link.make_link('self', pecan.request.host_url, 'v1', '', bookmark=True), link.Link.make_link('describedby', 'http://docs.openstack.org', 'developer/magnum/dev', 'api-spec-v1.html', bookmark=True, type='text/html')] v1.media_types = [MediaType('application/json', 'application/vnd.openstack.magnum.v1+json')] v1.baymodels = [link.Link.make_link('self', pecan.request.host_url, 'baymodels', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'baymodels', '', bookmark=True)] v1.bays = [link.Link.make_link('self', pecan.request.host_url, 'bays', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'bays', '', bookmark=True)] v1.clustertemplates = [link.Link.make_link('self', pecan.request.host_url, 'clustertemplates', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'clustertemplates', '', bookmark=True)] v1.clusters = [link.Link.make_link('self', pecan.request.host_url, 'clusters', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'clusters', '', bookmark=True)] v1.quotas = [link.Link.make_link('self', pecan.request.host_url, 'quotas', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'quotas', '', bookmark=True)] v1.certificates = [link.Link.make_link('self', pecan.request.host_url, 'certificates', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'certificates', '', bookmark=True)] v1.mservices = [link.Link.make_link('self', pecan.request.host_url, 'mservices', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'mservices', '', bookmark=True)] v1.stats = [link.Link.make_link('self', pecan.request.host_url, 'stats', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'stats', '', bookmark=True)] v1.federations = [link.Link.make_link('self', pecan.request.host_url, 'federations', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'federations', '', bookmark=True)] return v1 class Controller(controllers_base.Controller): """Version 1 API controller root.""" bays = bay.BaysController() baymodels = baymodel.BayModelsController() clusters = cluster.ClustersController() clustertemplates = cluster_template.ClusterTemplatesController() quotas = quota.QuotaController() certificates = certificate.CertificateController() mservices = magnum_services.MagnumServiceController() stats = stats.StatsController() federations = federation.FederationsController() @expose.expose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != BASE_VERSION: raise http_error.HTTPNotAcceptableAPIVersion(_( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service." "The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers, max_version=str(MAX_VER), min_version=str(MIN_VER)) # ensure the minor version is within the supported range if version < MIN_VER or version > MAX_VER: raise http_error.HTTPNotAcceptableAPIVersion(_( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers, max_version=str(MAX_VER), min_version=str(MIN_VER)) @pecan.expose() def _route(self, args): version = ver.Version( pecan.request.headers, MIN_VER_STR, MAX_VER_STR) # Always set the basic version headers pecan.response.headers[ver.Version.min_string] = MIN_VER_STR pecan.response.headers[ver.Version.max_string] = MAX_VER_STR pecan.response.headers[ver.Version.string] = " ".join( [ver.Version.service_string, str(version)]) pecan.response.headers["vary"] = ver.Version.string # assert that requested version is supported self._check_version(version, pecan.response.headers) pecan.request.version = version if pecan.request.body: msg = ("Processing request: url: %(url)s, %(method)s, " "body: %(body)s" % {'url': pecan.request.url, 'method': pecan.request.method, 'body': pecan.request.body}) LOG.debug(msg) return super(Controller, self)._route(args) __all__ = (Controller) magnum-6.1.0/magnum/api/controllers/v1/baymodel.py0000666000175100017510000004045613244017334022141 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import clients from magnum.common import exception from magnum.common import name_generator from magnum.common import policy from magnum import objects from magnum.objects import fields class BayModel(base.APIBase): """API representation of a Baymodel. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a Baymodel. """ uuid = types.uuid """Unique UUID for this Baymodel""" name = wtypes.StringType(min_length=1, max_length=255) """The name of the Baymodel""" coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True) """The Container Orchestration Engine for this bay model""" image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), mandatory=True) """The image name or UUID to use as a base image for this Baymodel""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this Baymodel""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of the master node for this Baymodel""" dns_nameserver = wtypes.IPv4AddressType() """The DNS nameserver address""" keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), mandatory=True) """The name of the nova ssh keypair""" external_network_id = wtypes.StringType(min_length=1, max_length=255) """The external network to attach to the Bay""" fixed_network = wtypes.StringType(min_length=1, max_length=255) """The fixed network name to attach to the Bay""" fixed_subnet = wtypes.StringType(min_length=1, max_length=255) """The fixed subnet name to attach to the Bay""" network_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container networks""" apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535) """The API server port for k8s""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" cluster_distro = wtypes.StringType(min_length=1, max_length=255) """The Cluster distro for the bay, e.g. coreos, fedora-atomic, etc.""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated Baymodel links""" http_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTP requests and relay them. The format is a URL including a port number. """ https_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTPS requests and relay them. The format is a URL including a port number. """ no_proxy = wtypes.StringType(min_length=1, max_length=255) """A comma separated list of IPs for which proxies should not be used in the bay """ volume_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container volumes""" registry_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether the docker registry is enabled""" labels = wtypes.DictType(str, str) """One or more key/value pairs""" tls_disabled = wsme.wsattr(types.boolean, default=False) """Indicates whether TLS should be disabled""" public = wsme.wsattr(types.boolean, default=False) """Indicates whether the Baymodel is public or not.""" server_type = wsme.wsattr(wtypes.Enum(str, *fields.ServerType.ALL), default='vm') """Server type for this bay model""" insecure_registry = wtypes.StringType(min_length=1, max_length=255) """Insecure registry URL when creating a Baymodel""" docker_storage_driver = wtypes.StringType(min_length=1, max_length=255) """Docker storage driver""" master_lb_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether created bays should have a load balancer for master nodes or not. """ floating_ip_enabled = wsme.wsattr(types.boolean, default=True) """Indicates whether created bays should have a floating ip or not.""" def __init__(self, **kwargs): self.fields = [] for field in objects.ClusterTemplate.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(baymodel, url): baymodel.links = [link.Link.make_link('self', url, 'baymodels', baymodel.uuid), link.Link.make_link('bookmark', url, 'baymodels', baymodel.uuid, bookmark=True)] return baymodel @classmethod def convert_with_links(cls, rpc_baymodel): baymodel = BayModel(**rpc_baymodel.as_dict()) return cls._convert_with_links(baymodel, pecan.request.host_url) @classmethod def sample(cls): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', image_id='Fedora-k8s', flavor_id='m1.small', master_flavor_id='m1.small', dns_nameserver='8.8.1.1', keypair_id='keypair1', external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', fixed_network='private', fixed_subnet='private-subnet', network_driver='libnetwork', volume_driver='cinder', apiserver_port=8080, docker_volume_size=25, docker_storage_driver='devicemapper', cluster_distro='fedora-atomic', coe=fields.ClusterType.KUBERNETES, http_proxy='http://proxy.com:123', https_proxy='https://proxy.com:123', no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', labels={'key1': 'val1', 'key2': 'val2'}, server_type='vm', insecure_registry='10.238.100.100:5000', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), public=False, master_lb_enabled=False, floating_ip_enabled=True, ) return cls._convert_with_links(sample, 'http://localhost:9511') class BayModelPatchType(types.JsonPatchType): _api_base = BayModel _extra_non_removable_attrs = {'/network_driver', '/external_network_id', '/tls_disabled', '/public', '/server_type', '/coe', '/registry_enabled', '/cluster_distro'} class BayModelCollection(collection.Collection): """API representation of a collection of Baymodels.""" baymodels = [BayModel] """A list containing Baymodel objects""" def __init__(self, **kwargs): self._type = 'baymodels' @staticmethod def convert_with_links(rpc_baymodels, limit, url=None, **kwargs): collection = BayModelCollection() collection.baymodels = [BayModel.convert_with_links(p) for p in rpc_baymodels] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.baymodels = [BayModel.sample()] return sample class BayModelsController(base.Controller): """REST controller for Baymodels.""" _custom_actions = { 'detail': ['GET'], } def _generate_name_for_baymodel(self, context): '''Generate a random name like: zeta-22-model.''' name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-model' def _get_baymodels_collection(self, marker, limit, sort_key, sort_dir, resource_url=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ClusterTemplate.get_by_uuid( pecan.request.context, marker) baymodels = objects.ClusterTemplate.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return BayModelCollection.convert_with_links(baymodels, limit, url=resource_url, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(BayModelCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Baymodels. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'baymodel:get_all', action='baymodel:get_all') return self._get_baymodels_collection(marker, limit, sort_key, sort_dir) @expose.expose(BayModelCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Baymodels with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'baymodel:detail', action='baymodel:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "baymodels": raise exception.HTTPNotFound resource_url = '/'.join(['baymodels', 'detail']) return self._get_baymodels_collection(marker, limit, sort_key, sort_dir, resource_url) @expose.expose(BayModel, types.uuid_or_name) def get_one(self, baymodel_ident): """Retrieve information about the given Baymodel. :param baymodel_ident: UUID or logical name of a baymodel. """ context = pecan.request.context baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) if not baymodel.public: policy.enforce(context, 'baymodel:get', baymodel.as_dict(), action='baymodel:get') return BayModel.convert_with_links(baymodel) @expose.expose(BayModel, body=BayModel, status_code=201) @validation.enforce_server_type() @validation.enforce_network_driver_types_create() @validation.enforce_volume_driver_types_create() @validation.enforce_volume_storage_size_create() def post(self, baymodel): """Create a new Baymodel. :param baymodel: a Baymodel within the request body. """ context = pecan.request.context policy.enforce(context, 'baymodel:create', action='baymodel:create') baymodel_dict = baymodel.as_dict() cli = clients.OpenStackClients(context) attr_validator.validate_os_resources(context, baymodel_dict) image_data = attr_validator.validate_image(cli, baymodel_dict['image_id']) baymodel_dict['cluster_distro'] = image_data['os_distro'] baymodel_dict['project_id'] = context.project_id baymodel_dict['user_id'] = context.user_id # check permissions for making baymodel public if baymodel_dict['public']: if not policy.enforce(context, "baymodel:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() # NOTE(yuywz): We will generate a random human-readable name for # baymodel if the name is not specified by user. arg_name = baymodel_dict.get('name') name = arg_name or self._generate_name_for_baymodel(context) baymodel_dict['name'] = name new_baymodel = objects.ClusterTemplate(context, **baymodel_dict) new_baymodel.create() # Set the HTTP Location Header pecan.response.location = link.build_url('baymodels', new_baymodel.uuid) return BayModel.convert_with_links(new_baymodel) @wsme.validate(types.uuid_or_name, [BayModelPatchType]) @expose.expose(BayModel, types.uuid_or_name, body=[BayModelPatchType]) @validation.enforce_network_driver_types_update() @validation.enforce_volume_driver_types_update() def patch(self, baymodel_ident, patch): """Update an existing Baymodel. :param baymodel_ident: UUID or logic name of a Baymodel. :param patch: a json PATCH document to apply to this Baymodel. """ context = pecan.request.context baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) policy.enforce(context, 'baymodel:update', baymodel.as_dict(), action='baymodel:update') try: baymodel_dict = baymodel.as_dict() new_baymodel = BayModel(**api_utils.apply_jsonpatch( baymodel_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) new_baymodel_dict = new_baymodel.as_dict() attr_validator.validate_os_resources(context, new_baymodel_dict) # check permissions when updating baymodel public flag if baymodel.public != new_baymodel.public: if not policy.enforce(context, "baymodel:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() # Update only the fields that have changed for field in objects.ClusterTemplate.fields: try: patch_val = getattr(new_baymodel, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if baymodel[field] != patch_val: baymodel[field] = patch_val baymodel.save() return BayModel.convert_with_links(baymodel) @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, baymodel_ident): """Delete a Baymodel. :param baymodel_ident: UUID or logical name of a Baymodel. """ context = pecan.request.context baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) policy.enforce(context, 'baymodel:delete', baymodel.as_dict(), action='baymodel:delete') baymodel.destroy() magnum-6.1.0/magnum/api/controllers/v1/cluster.py0000777000175100017510000005526713244017334022037 0ustar zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import clients from magnum.common import exception from magnum.common import name_generator from magnum.common import policy import magnum.conf from magnum.i18n import _ from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class ClusterID(wtypes.Base): """API representation of a cluster ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a cluster ID. """ uuid = types.uuid """Unique UUID for this cluster""" def __init__(self, uuid): self.uuid = uuid class Cluster(base.APIBase): """API representation of a cluster. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a Cluster. """ _cluster_template_id = None def _get_cluster_template_id(self): return self._cluster_template_id def _set_cluster_template_id(self, value): if value and self._cluster_template_id != value: try: cluster_template = api_utils.get_resource('ClusterTemplate', value) self._cluster_template_id = cluster_template.uuid except exception.ClusterTemplateNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Cluster e.code = 400 # BadRequest raise elif value == wtypes.Unset: self._cluster_template_id = wtypes.Unset uuid = types.uuid """Unique UUID for this cluster""" name = wtypes.StringType(min_length=1, max_length=242, pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') """Name of this cluster, max length is limited to 242 because of heat stack requires max length limit to 255, and Magnum amend a uuid length""" cluster_template_id = wsme.wsproperty(wtypes.text, _get_cluster_template_id, _set_cluster_template_id, mandatory=True) """The cluster_template UUID""" keypair = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The name of the nova ssh keypair""" node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The node count for this cluster. Default to 1 if not set""" master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The number of master nodes for this cluster. Default to 1 if not set""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" labels = wtypes.DictType(str, str) """One or more key/value pairs""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of the master node for this Cluster""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this Cluster""" create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60) """Timeout for creating the cluster in minutes. Default to 60 if not set""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated cluster links""" stack_id = wsme.wsattr(wtypes.text, readonly=True) """Stack id of the heat stack""" status = wtypes.Enum(str, *fields.ClusterStatus.ALL) """Status of the cluster from the heat stack""" status_reason = wtypes.text """Status reason of the cluster from the heat stack""" discovery_url = wtypes.text """Url used for cluster node discovery""" api_address = wsme.wsattr(wtypes.text, readonly=True) """Api address of cluster master node""" coe_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the COE software currently running in this cluster. Example: swarm version or kubernetes version.""" container_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the container software. Example: docker version.""" project_id = wsme.wsattr(wtypes.text, readonly=True) """Project id of the cluster belongs to""" user_id = wsme.wsattr(wtypes.text, readonly=True) """User id of the cluster belongs to""" node_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster slave nodes""" master_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster master nodes""" faults = wsme.wsattr(wtypes.DictType(str, wtypes.text)) """Fault info collected from the heat resources of this cluster""" def __init__(self, **kwargs): super(Cluster, self).__init__() self.fields = [] for field in objects.Cluster.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(cluster, url, expand=True): if not expand: cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id', 'keypair', 'docker_volume_size', 'labels', 'node_count', 'status', 'master_flavor_id', 'flavor_id', 'create_timeout', 'master_count', 'stack_id']) cluster.links = [link.Link.make_link('self', url, 'clusters', cluster.uuid), link.Link.make_link('bookmark', url, 'clusters', cluster.uuid, bookmark=True)] return cluster @classmethod def convert_with_links(cls, rpc_cluster, expand=True): cluster = Cluster(**rpc_cluster.as_dict()) return cls._convert_with_links(cluster, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): temp_id = '4a96ac4b-2447-43f1-8ca6-9fd6f36d146d' sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', cluster_template_id=temp_id, keypair=None, node_count=2, master_count=1, docker_volume_size=1, labels={}, master_flavor_id='m1.small', flavor_id='m1.small', create_timeout=15, stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', status=fields.ClusterStatus.CREATE_COMPLETE, status_reason="CREATE completed successfully", api_address='172.24.4.3', node_addresses=['172.24.4.4', '172.24.4.5'], created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), coe_version=None, container_version=None) return cls._convert_with_links(sample, 'http://localhost:9511', expand) class ClusterPatchType(types.JsonPatchType): _api_base = Cluster @staticmethod def internal_attrs(): internal_attrs = ['/api_address', '/node_addresses', '/master_addresses', '/stack_id', '/ca_cert_ref', '/magnum_cert_ref', '/trust_id', '/trustee_user_name', '/trustee_password', '/trustee_user_id'] return types.JsonPatchType.internal_attrs() + internal_attrs class ClusterCollection(collection.Collection): """API representation of a collection of clusters.""" clusters = [Cluster] """A list containing cluster objects""" def __init__(self, **kwargs): self._type = 'clusters' @staticmethod def convert_with_links(rpc_clusters, limit, url=None, expand=False, **kwargs): collection = ClusterCollection() collection.clusters = [Cluster.convert_with_links(p, expand) for p in rpc_clusters] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.clusters = [Cluster.sample(expand=False)] return sample class ClustersController(base.Controller): """REST controller for Clusters.""" def __init__(self): super(ClustersController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _generate_name_for_cluster(self, context): """Generate a random name like: zeta-22-cluster.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-cluster' def _get_clusters_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): context = pecan.request.context if context.is_admin: if expand: policy.enforce(context, "cluster:detail_all_projects", action="cluster:detail_all_projects") else: policy.enforce(context, "cluster:get_all_all_projects", action="cluster:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, marker) clusters = objects.Cluster.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterCollection.convert_with_links(clusters, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:get_all', action='cluster:get_all') return self._get_clusters_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:detail', action='cluster:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clusters": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['clusters', 'detail']) return self._get_clusters_collection(marker, limit, sort_key, sort_dir, expand, resource_url) def _collect_fault_info(self, context, cluster): """Collect fault info from heat resources of given cluster and store them into cluster.faults. """ osc = clients.OpenStackClients(context) filters = {'status': 'FAILED'} try: failed_resources = osc.heat().resources.list( cluster.stack_id, nested_depth=2, filters=filters) except Exception as e: failed_resources = [] LOG.warning("Failed to retrieve failed resources for " "cluster %(cluster)s from Heat stack " "%(stack)s due to error: %(e)s", {'cluster': cluster.uuid, 'stack': cluster.stack_id, 'e': e}, exc_info=True) return {res.resource_name: res.resource_status_reason for res in failed_resources} @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): """Retrieve information about the given Cluster. :param cluster_ident: UUID or logical name of the Cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:get_one_all_projects", action="cluster:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:get', cluster.as_dict(), action='cluster:get') cluster = Cluster.convert_with_links(cluster) if cluster.status in fields.ClusterStatus.STATUS_FAILED: cluster.faults = self._collect_fault_info(context, cluster) return cluster def _check_cluster_quota_limit(self, context): try: # Check if there is any explicit quota limit set in Quotas table quota = objects.Quota.get_quota_by_project_id_resource( context, context.project_id, 'Cluster') cluster_limit = quota.hard_limit except exception.QuotaNotFound: # If explicit quota was not set for the project, use default limit cluster_limit = CONF.quotas.max_clusters_per_project if objects.Cluster.get_count_all(context) >= cluster_limit: msg = _("You have reached the maximum clusters per project, " "%d. You may delete a cluster to make room for a new " "one.") % cluster_limit raise exception.ResourceLimitExceeded(msg=msg) @expose.expose(ClusterID, body=Cluster, status_code=202) @validation.enforce_cluster_type_supported() @validation.enforce_cluster_volume_storage_size() def post(self, cluster): """Create a new cluster. :param cluster: a cluster within the request body. """ context = pecan.request.context policy.enforce(context, 'cluster:create', action='cluster:create') self._check_cluster_quota_limit(context) temp_id = cluster.cluster_template_id cluster_template = objects.ClusterTemplate.get_by_uuid(context, temp_id) # If keypair not present, use cluster_template value if cluster.keypair is None: cluster.keypair = cluster_template.keypair_id # If docker_volume_size is not present, use cluster_template value if cluster.docker_volume_size == wtypes.Unset: cluster.docker_volume_size = cluster_template.docker_volume_size # If labels is not present, use cluster_template value if cluster.labels == wtypes.Unset: cluster.labels = cluster_template.labels # If master_flavor_id is not present, use cluster_template value if (cluster.master_flavor_id == wtypes.Unset or not cluster.master_flavor_id): cluster.master_flavor_id = cluster_template.master_flavor_id # If flavor_id is not present, use cluster_template value if cluster.flavor_id == wtypes.Unset or not cluster.flavor_id: cluster.flavor_id = cluster_template.flavor_id cluster_dict = cluster.as_dict() attr_validator.validate_os_resources(context, cluster_template.as_dict(), cluster_dict) attr_validator.validate_master_count(cluster_dict, cluster_template.as_dict()) cluster_dict['project_id'] = context.project_id cluster_dict['user_id'] = context.user_id # NOTE(yuywz): We will generate a random human-readable name for # cluster if the name is not specified by user. name = cluster_dict.get('name') or \ self._generate_name_for_cluster(context) cluster_dict['name'] = name cluster_dict['coe_version'] = None cluster_dict['container_version'] = None new_cluster = objects.Cluster(context, **cluster_dict) new_cluster.uuid = uuid.uuid4() pecan.request.rpcapi.cluster_create_async(new_cluster, cluster.create_timeout) return ClusterID(new_cluster.uuid) @base.Controller.api_version("1.1", "1.2") @wsme.validate(types.uuid, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, patch): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param patch: a json PATCH document to apply to this cluster. """ cluster = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster) return ClusterID(cluster.uuid) @base.Controller.api_version("1.3") # noqa @wsme.validate(types.uuid, bool, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, types.boolean, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, rollback=False, patch=None): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param rollback: whether to rollback cluster on update failure. :param patch: a json PATCH document to apply to this cluster. """ cluster = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, rollback) return ClusterID(cluster.uuid) def _patch(self, cluster_ident, patch): context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:update', cluster.as_dict(), action='cluster:update') try: cluster_dict = cluster.as_dict() new_cluster = Cluster(**api_utils.apply_jsonpatch(cluster_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Cluster.fields: try: patch_val = getattr(new_cluster, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if cluster[field] != patch_val: cluster[field] = patch_val delta = cluster.obj_what_changed() validation.validate_cluster_properties(delta) return cluster @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): """Delete a cluster. :param cluster_ident: UUID of cluster or logical name of the cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'cluster:delete_all_projects', action='cluster:delete_all_projects') context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:delete', cluster.as_dict(), action='cluster:delete') pecan.request.rpcapi.cluster_delete_async(cluster.uuid) magnum-6.1.0/magnum/api/controllers/v1/collection.py0000666000175100017510000000334313244017334022472 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link class Collection(base.APIBase): next = wtypes.text """A link to retrieve the next subset of the collection""" @property def collection(self): return getattr(self, self._type) def has_next(self, limit): """Return whether collection has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, marker_attribute='uuid', **kwargs): """Return a link to the next subset of the collection.""" if not self.has_next(limit): return wtypes.Unset resource_url = url or self._type q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': getattr(self.collection[-1], marker_attribute)} return link.Link.make_link('next', pecan.request.host_url, resource_url, next_args).href magnum-6.1.0/magnum/api/controllers/v1/quota.py0000666000175100017510000001721713244017334021475 0ustar zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import exception from magnum.common import policy from magnum.i18n import _ from magnum import objects from magnum.objects import fields class Quota(base.APIBase): """API representation of a project Quota. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of Quota. """ id = wsme.wsattr(wtypes.IntegerType(minimum=1)) """unique id""" hard_limit = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The hard limit for total number of clusters. Default to 1 if not set""" project_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The project id""" resource = wsme.wsattr(wtypes.Enum(str, *fields.QuotaResourceName.ALL), default='Cluster') """The resource name""" def __init__(self, **kwargs): super(Quota, self).__init__() self.fields = [] for field in objects.Quota.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @classmethod def convert(cls, quota): return Quota(**quota.as_dict()) class QuotaCollection(collection.Collection): """API representation of a collection of quotas.""" quotas = [Quota] """A list containing quota objects""" def __init__(self, **kwargs): self._type = 'quotas' @staticmethod def convert(quotas, limit, **kwargs): collection = QuotaCollection() collection.quotas = [Quota.convert(p) for p in quotas] collection.next = collection.get_next(limit, marker_attribute='id', **kwargs) return collection class QuotaController(base.Controller): """REST controller for Quotas.""" def __init__(self): super(QuotaController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _get_quota_collection(self, marker, limit, sort_key, sort_dir, filters): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Quota.get_by_id(pecan.request.context, marker) quotas = objects.Quota.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return QuotaCollection.convert(quotas, limit, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(QuotaCollection, int, int, wtypes.text, wtypes.text, types.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', all_tenants=False): """Retrieve a list of quotas. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param all_tenants: a flag to indicate all or current tenant. """ context = pecan.request.context policy.enforce(context, 'quota:get_all', action='quota:get_all') filters = {} if not context.is_admin or not all_tenants: filters = {"project_id": context.project_id} return self._get_quota_collection(marker, limit, sort_key, sort_dir, filters) @expose.expose(Quota, wtypes.text, wtypes.text) def get_one(self, project_id, resource): """Retrieve Quota information for the given project_id. :param id: project id. :param resource: resource name. """ context = pecan.request.context policy.enforce(context, 'quota:get', action='quota:get') if not context.is_admin and project_id != context.project_id: raise exception.NotAuthorized() quota = objects.Quota.get_quota_by_project_id_resource(context, project_id, resource) return Quota.convert(quota) @expose.expose(Quota, body=Quota, status_code=201) @validation.enforce_valid_project_id_on_create() def post(self, quota): """Create Quota. :param quota: a json document to create this Quota. """ context = pecan.request.context policy.enforce(context, 'quota:create', action='quota:create') quota_dict = quota.as_dict() if 'project_id'not in quota_dict or not quota_dict['project_id']: msg = _('Must provide a valid project ID.') raise exception.InvalidParameterValue(message=msg) new_quota = objects.Quota(context, **quota_dict) new_quota.create() return Quota.convert(new_quota) @expose.expose(Quota, wtypes.text, wtypes.text, body=Quota, status_code=202) def patch(self, project_id, resource, quotapatch): """Update Quota for a given project_id. :param project_id: project id. :param resource: resource name. :param quotapatch: a json document to update Quota. """ context = pecan.request.context policy.enforce(context, 'quota:update', action='quota:update') quota_dict = quotapatch.as_dict() quota_dict['project_id'] = project_id quota_dict['resource'] = resource db_quota = objects.Quota.update_quota(context, project_id, quota_dict) return Quota.convert(db_quota) @expose.expose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, project_id, resource): """Delete Quota for a given project_id and resource. :param project_id: project id. :param resource: resource name. """ context = pecan.request.context policy.enforce(context, 'quota:delete', action='quota:delete') quota_dict = {"project_id": project_id, "resource": resource} quota = objects.Quota(context, **quota_dict) quota.delete() magnum-6.1.0/magnum/api/controllers/base.py0000666000175100017510000001746113244017334020731 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import six from magnum.api.controllers import versions from magnum.api import versioned_method from magnum.common import exception from magnum.i18n import _ from pecan import rest from webob import exc import wsme from wsme import types as wtypes # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" def as_dict(self): """Render this object as a dict of its fields.""" return {k: getattr(self, k) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset} def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds version function dictionary to the class.""" versioned_methods = None for base in bases: if base.__name__ == "Controller": # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if VER_METHOD_ATTR in base.__dict__: versioned_methods = getattr(base, VER_METHOD_ATTR) delattr(base, VER_METHOD_ATTR) if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) @six.add_metaclass(ControllerMetaclass) class Controller(rest.RestController): """Base Rest Controller""" def __getattribute__(self, key): def version_select(): """Select the correct method based on version @return: Returns the correct versioned method @raises: HTTPNotAcceptable if there is no method which matches the name and version constraints """ from pecan import request ver = request.version func_list = self.versioned_methods[key] for func in func_list: if ver.matches(func.start_version, func.end_version): return func.func raise exc.HTTPNotAcceptable(_( "Version %(ver)s was requested but the requested API %(api)s " "is not supported for this version.") % {'ver': ver, 'api': key}) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if version_meth_dict and key in version_meth_dict: return version_select().__get__(self, self.__class__) return object.__getattribute__(self, key) # NOTE: This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None): """Decorator for versioning api methods. Add the decorator to any pecan method that has been exposed. This decorator will store the method, min version, and max version in a list for each api. It will check that there is no overlap between versions and methods. When the api is called the controller will use the list for each api to determine which method to call. Example: @base.Controller.api_version("1.1", "1.2") @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): {...code for versions 1.1 to 1.2...} @base.Controller.api_version("1.3") @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): {...code for versions 1.3 to latest} @min_ver: string representing minimum version @max_ver: optional string representing maximum version @raises: ApiVersionsIntersect if an version overlap is found between method versions. """ def decorator(f): obj_min_ver = versions.Version('', '', '', min_ver) if max_ver: obj_max_ver = versions.Version('', '', '', max_ver) else: obj_max_ver = versions.Version('', '', '', versions.CURRENT_MAX_VER) # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) is_intersect = Controller.check_for_versions_intersection( func_list) if is_intersect: raise exception.ApiVersionsIntersect( name=new_func.name, min_ver=new_func.start_version, max_ver=new_func.end_version ) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. func_list.sort(key=lambda f: f.start_version, reverse=True) return f return decorator @staticmethod def check_for_versions_intersection(func_list): """Determines whether function list intersections General algorithm: https://en.wikipedia.org/wiki/Intersection_algorithm :param func_list: list of VersionedMethod objects :return: boolean """ pairs = [] counter = 0 for f in func_list: pairs.append((f.start_version, 1)) pairs.append((f.end_version, -1)) pairs.sort(key=operator.itemgetter(1), reverse=True) pairs.sort(key=operator.itemgetter(0)) for p in pairs: counter += p[1] if counter > 1: return True return False magnum-6.1.0/magnum/api/controllers/__init__.py0000666000175100017510000000000013244017334021533 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/controllers/root.py0000666000175100017510000000637613244017334021005 0ustar zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers import v1 from magnum.api.controllers import versions from magnum.api import expose class Version(base.APIBase): """An API version representation.""" id = wtypes.text """The ID of the version, also acts as the release number""" links = [link.Link] """A Link that point to a specific version of the API""" status = wtypes.text """The current status of the version: CURRENT, SUPPORTED, UNSUPPORTED""" max_version = wtypes.text """The max microversion supported by this version""" min_version = wtypes.text """The min microversion supported by this version""" @staticmethod def convert(id, status, max, min): version = Version() version.id = id version.links = [link.Link.make_link('self', pecan.request.host_url, id, '', bookmark=True)] version.status = status version.max_version = max version.min_version = min return version class Root(base.APIBase): name = wtypes.text """The name of the API""" description = wtypes.text """Some information about this API""" versions = [Version] """Links to all the versions available in this API""" @staticmethod def convert(): root = Root() root.name = "OpenStack Magnum API" root.description = ("Magnum is an OpenStack project which aims to " "provide container cluster management.") root.versions = [Version.convert('v1', "CURRENT", versions.CURRENT_MAX_VER, versions.BASE_VER)] return root class RootController(rest.RestController): _versions = ['v1'] """All supported API versions""" _default_version = 'v1' """The default API version""" v1 = v1.Controller() @expose.expose(Root) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return Root.convert() @pecan.expose() def _route(self, args): """Overrides the default routing behavior. It redirects the request to the default version of the magnum API if the version number is not specified in the url. """ if args[0] and args[0] not in self._versions: args = [self._default_version] + args return super(RootController, self)._route(args) magnum-6.1.0/magnum/api/controllers/link.py0000666000175100017510000000375513244017334020755 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = pecan.request.host_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} class Link(base.APIBase): """A link representation.""" href = wtypes.text """The url of a link.""" rel = wtypes.text """The name of a link.""" type = wtypes.text """Indicates the type of document/link.""" @staticmethod def make_link(rel_name, url, resource, resource_args, bookmark=False, type=wtypes.Unset): href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) return Link(href=href, rel=rel_name, type=type) @classmethod def sample(cls): sample = cls(href="http://localhost:9511/clusters/" "eaaca217-e7d8-47b4-bb41-3f99f20eed89", rel="bookmark") return sample magnum-6.1.0/magnum/api/controllers/versions.py0000666000175100017510000001223613244017334021662 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from magnum.i18n import _ # NOTE(yuntong): v1.0 is reserved to indicate Kilo's API, but is not presently # supported by the API service. All changes between Kilo and the # point where we added microversioning are considered backwards- # compatible, but are not specifically discoverable at this time. # # The v1.1 version indicates this "initial" version as being # different from Kilo (v1.0), and includes the following changes: # # Add details of new api versions here: # # For each newly added microversion change, update the API version history # string below with a one or two line description. Also update # rest_api_version_history.rst for extra information on microversion. REST_API_VERSION_HISTORY = """REST API Version History: * 1.1 - Initial version * 1.2 - Async bay operations support * 1.3 - Add bay rollback support * 1.4 - Add stats API * 1.5 - Add cluster CA certificate rotation support * 1.6 - Add quotas API """ BASE_VER = '1.1' CURRENT_MAX_VER = '1.6' class Version(object): """API Version object.""" string = 'OpenStack-API-Version' """HTTP Header string carrying the requested version""" min_string = 'OpenStack-API-Minimum-Version' """HTTP response header""" max_string = 'OpenStack-API-Maximum-Version' """HTTP response header""" service_string = 'container-infra' def __init__(self, headers, default_version, latest_version, from_string=None): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :param from_string: create the version from string not headers :raises: webob.HTTPNotAcceptable """ if from_string: (self.major, self.minor) = tuple(int(i) for i in from_string.split('.')) else: (self.major, self.minor) = Version.parse_headers(headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_hdr = headers.get(Version.string, default_version) try: version_service, version_str = version_hdr.split() except ValueError: raise exc.HTTPNotAcceptable(_( "Invalid service type for %s header") % Version.string) if version_str.lower() == 'latest': version_service, version_str = latest_version.split() if version_service != Version.service_string: raise exc.HTTPNotAcceptable(_( "Invalid service type for %s header") % Version.string) try: version = tuple(int(i) for i in version_str.split('.')) except ValueError: version = () if len(version) != 2: raise exc.HTTPNotAcceptable(_( "Invalid value for %s header") % Version.string) return version def is_null(self): return self.major == 0 and self.minor == 0 def matches(self, start_version, end_version): if self.is_null(): raise ValueError return start_version <= self <= end_version def __lt__(self, other): if self.major < other.major: return True if self.major == other.major and self.minor < other.minor: return True return False def __gt__(self, other): if self.major > other.major: return True if self.major == other.major and self.minor > other.minor: return True return False def __eq__(self, other): return self.major == other.major and self.minor == other.minor def __le__(self, other): return self < other or self == other def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self > other or self == other magnum-6.1.0/magnum/api/__init__.py0000666000175100017510000000000013244017334017165 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/api/versioned_method.py0000666000175100017510000000234413244017334021001 0ustar zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class VersionedMethod(object): def __init__(self, name, start_version, end_version, func): """Versioning information for a single method @name: Name of the method @start_version: Minimum acceptable version @end_version: Maximum acceptable version @func: Method to call Minimum and maximum are inclusive """ self.name = name self.start_version = start_version self.end_version = end_version self.func = func def __str__(self): return ("Version Method %s: min: %s, max: %s" % (self.name, self.start_version, self.end_version)) magnum-6.1.0/magnum/api/app.py0000777000175100017510000000355213244017334016230 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from magnum.api import config as api_config from magnum.api import middleware from magnum.common import config as common_config import magnum.conf CONF = magnum.conf.CONF LOG = log.getLogger(__name__) def get_pecan_config(): # Set up the pecan configuration filename = api_config.__file__.replace('.pyc', '.py') return pecan.configuration.conf_from_file(filename) def setup_app(config=None): if not config: config = get_pecan_config() app_conf = dict(config.app) common_config.set_config_defaults() app = pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), wrap_app=middleware.ParsableErrorMiddleware, **app_conf ) return app def load_app(): cfg_file = None cfg_path = CONF.api.api_paste_config if not os.path.isabs(cfg_path): cfg_file = CONF.find_file(cfg_path) elif os.path.exists(cfg_path): cfg_file = cfg_path if not cfg_file: raise cfg.ConfigFilesNotFoundError([CONF.api.api_paste_config]) LOG.info("Full WSGI config used: %s", cfg_file) return deploy.loadapp("config:" + cfg_file) def app_factory(global_config, **local_conf): return setup_app() magnum-6.1.0/magnum/api/validation.py0000666000175100017510000003007513244017334017577 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import decorator import pecan from keystoneauth1 import exceptions as ka_exception from magnum.api import utils as api_utils from magnum.common import clients from magnum.common import exception import magnum.conf from magnum.drivers.common import driver from magnum.i18n import _ from magnum import objects CONF = magnum.conf.CONF cluster_update_allowed_properties = set(['node_count']) federation_update_allowed_properties = set(['member_ids', 'properties']) def enforce_cluster_type_supported(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster = args[1] cluster_template = objects.ClusterTemplate.get_by_uuid( pecan.request.context, cluster.cluster_template_id) cluster_type = (cluster_template.server_type, cluster_template.cluster_distro, cluster_template.coe) driver.Driver.get_driver(*cluster_type) return func(*args, **kwargs) return wrapper def enforce_driver_supported(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] cluster_distro = cluster_template.cluster_distro if not cluster_distro: try: cli = clients.OpenStackClients(pecan.request.context) image_id = cluster_template.image_id image = api_utils.get_openstack_resource(cli.glance().images, image_id, 'images') cluster_distro = image.get('os_distro') except Exception: pass cluster_type = (cluster_template.server_type, cluster_distro, cluster_template.coe) driver.Driver.get_driver(*cluster_type) return func(*args, **kwargs) return wrapper def enforce_cluster_volume_storage_size(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster = args[1] cluster_template = objects.ClusterTemplate.get_by_uuid( pecan.request.context, cluster.cluster_template_id) _enforce_volume_storage_size( cluster_template.as_dict(), cluster.as_dict()) return func(*args, **kwargs) return wrapper def enforce_valid_project_id_on_create(): @decorator.decorator def wrapper(func, *args, **kwargs): quota = args[1] _validate_project_id(quota.project_id) return func(*args, **kwargs) return wrapper def _validate_project_id(project_id): try: context = pecan.request.context osc = clients.OpenStackClients(context) osc.keystone().domain_admin_client.projects.get(project_id) except ka_exception.http.NotFound: raise exception.ProjectNotFound(name='project_id', id=project_id) def enforce_network_driver_types_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_network_driver_types(cluster_template) return func(*args, **kwargs) return wrapper def enforce_network_driver_types_update(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template_ident = args[1] patch = args[2] cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) try: cluster_template_dict = api_utils.apply_jsonpatch( cluster_template.as_dict(), patch) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) cluster_template = objects.ClusterTemplate(pecan.request.context, **cluster_template_dict) _enforce_network_driver_types(cluster_template) return func(*args, **kwargs) return wrapper def _enforce_network_driver_types(cluster_template): validator = Validator.get_coe_validator(cluster_template.coe) if not cluster_template.network_driver: cluster_template.network_driver = validator.default_network_driver validator.validate_network_driver(cluster_template.network_driver) def enforce_server_type(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_server_type(cluster_template) return func(*args, **kwargs) return wrapper def _enforce_server_type(cluster_template): validator = Validator.get_coe_validator(cluster_template.coe) validator.validate_server_type(cluster_template.server_type) def enforce_volume_driver_types_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_volume_driver_types(cluster_template.as_dict()) return func(*args, **kwargs) return wrapper def enforce_volume_storage_size_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_volume_storage_size(cluster_template.as_dict(), {}) return func(*args, **kwargs) return wrapper def enforce_volume_driver_types_update(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template_ident = args[1] patch = args[2] cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) try: cluster_template_dict = api_utils.apply_jsonpatch( cluster_template.as_dict(), patch) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) _enforce_volume_driver_types(cluster_template_dict) return func(*args, **kwargs) return wrapper def _enforce_volume_driver_types(cluster_template): validator = Validator.get_coe_validator(cluster_template['coe']) if not cluster_template.get('volume_driver'): return validator.validate_volume_driver(cluster_template['volume_driver']) def _enforce_volume_storage_size(cluster_template, cluster): volume_size = cluster.get('docker_volume_size') \ or cluster_template.get('docker_volume_size') if not volume_size: return storage_driver = cluster_template.get('docker_storage_driver') if storage_driver == 'devicemapper': if volume_size < 3: raise exception.InvalidParameterValue( 'docker volume size %s GB is not valid, ' 'expecting minimum value 3GB for %s storage ' 'driver.' % (volume_size, storage_driver)) def validate_cluster_properties(delta): update_disallowed_properties = delta - cluster_update_allowed_properties if update_disallowed_properties: err = (_("cannot change cluster property(ies) %s.") % ", ".join(update_disallowed_properties)) raise exception.InvalidParameterValue(err=err) def validate_federation_properties(delta): update_disallowed_properties = delta - federation_update_allowed_properties if update_disallowed_properties: err = (_("cannot change federation property(ies) %s.") % ", ".join(update_disallowed_properties)) raise exception.InvalidParameterValue(err=err) class Validator(object): @classmethod def get_coe_validator(cls, coe): if coe == 'kubernetes': return K8sValidator() elif coe == 'swarm' or coe == 'swarm-mode': return SwarmValidator() elif coe == 'mesos': return MesosValidator() else: raise exception.InvalidParameterValue( _('Requested COE type %s is not supported.') % coe) @classmethod def validate_network_driver(cls, driver): cls._validate_network_driver_supported(driver) cls._validate_network_driver_allowed(driver) @classmethod def _validate_network_driver_supported(cls, driver): """Confirm that driver is supported by Magnum for this COE.""" if driver not in cls.supported_network_drivers: raise exception.InvalidParameterValue(_( 'Network driver type %(driver)s is not supported, ' 'expecting a %(supported_drivers)s network driver.') % { 'driver': driver, 'supported_drivers': '/'.join( cls.supported_network_drivers + ['unspecified'])}) @classmethod def _validate_network_driver_allowed(cls, driver): """Confirm that driver is allowed via configuration for this COE.""" if ('all' not in cls.allowed_network_drivers and driver not in cls.allowed_network_drivers): raise exception.InvalidParameterValue(_( 'Network driver type %(driver)s is not allowed, ' 'expecting a %(allowed_drivers)s network driver. ') % { 'driver': driver, 'allowed_drivers': '/'.join( cls.allowed_network_drivers + ['unspecified'])}) @classmethod def validate_volume_driver(cls, driver): cls._validate_volume_driver_supported(driver) @classmethod def _validate_volume_driver_supported(cls, driver): """Confirm that volume driver is supported by Magnum for this COE.""" if driver not in cls.supported_volume_driver: raise exception.InvalidParameterValue(_( 'Volume driver type %(driver)s is not supported, ' 'expecting a %(supported_volume_driver)s volume driver.') % { 'driver': driver, 'supported_volume_driver': '/'.join( cls.supported_volume_driver + ['unspecified'])}) @classmethod def validate_server_type(cls, server_type): cls._validate_server_type(server_type) @classmethod def _validate_server_type(cls, server_type): """Confirm that server type is supported by Magnum for this COE.""" if server_type not in cls.supported_server_types: raise exception.InvalidParameterValue(_( 'Server type %(server_type)s is not supported, ' 'expecting a %(supported_server_types)s server type.') % { 'server_type': server_type, 'supported_server_types': '/'.join( cls.supported_server_types + ['unspecified'])}) class K8sValidator(Validator): supported_network_drivers = ['flannel', 'calico'] supported_server_types = ['vm', 'bm'] allowed_network_drivers = ( CONF.cluster_template.kubernetes_allowed_network_drivers) default_network_driver = ( CONF.cluster_template.kubernetes_default_network_driver) supported_volume_driver = ['cinder'] class SwarmValidator(Validator): supported_network_drivers = ['docker', 'flannel'] supported_server_types = ['vm', 'bm'] allowed_network_drivers = (CONF.cluster_template. swarm_allowed_network_drivers) default_network_driver = (CONF.cluster_template. swarm_default_network_driver) supported_volume_driver = ['rexray'] class MesosValidator(Validator): supported_network_drivers = ['docker'] supported_server_types = ['vm', 'bm'] allowed_network_drivers = (CONF.cluster_template. mesos_allowed_network_drivers) default_network_driver = (CONF.cluster_template. mesos_default_network_driver) supported_volume_driver = ['rexray'] magnum-6.1.0/magnum/api/utils.py0000666000175100017510000001175513244017334016611 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import jsonpatch from oslo_utils import uuidutils import pecan import wsme from magnum.common import exception from magnum.common import utils import magnum.conf from magnum.i18n import _ from magnum import objects CONF = magnum.conf.CONF JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError) DOCKER_MINIMUM_MEMORY = 4 * 1024 * 1024 def validate_limit(limit): if limit is not None and limit <= 0: raise wsme.exc.ClientSideError(_("Limit must be positive")) if limit is not None: return min(CONF.api.max_limit, limit) else: return CONF.api.max_limit def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) return sort_dir def validate_docker_memory(mem_str): """Docker require that Minimum memory limit >= 4M.""" try: mem = utils.get_docker_quantity(mem_str) except exception.UnsupportedDockerQuantityFormat: raise wsme.exc.ClientSideError(_("Invalid docker memory specified. " "Acceptable values are format: " "[]," "where unit = b, k, m or g")) if mem < DOCKER_MINIMUM_MEMORY: raise wsme.exc.ClientSideError(_("Docker Minimum memory limit" "allowed is %d B.") % DOCKER_MINIMUM_MEMORY) def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: attr = p['path'].lstrip('/') if attr not in doc: msg = _("Adding a new attribute %s to the root of " "the resource is not allowed.") % p['path'] raise wsme.exc.ClientSideError(msg) if doc[attr] is not None: msg = _("The attribute %s has existed, please use " "'replace' operation instead.") % p['path'] raise wsme.exc.ClientSideError(msg) if p['op'] == 'replace' and p['path'] == '/labels': try: val = p['value'] dict_val = val if type(val) == dict else ast.literal_eval(val) p['value'] = dict_val except (SyntaxError, ValueError, AssertionError) as e: raise exception.PatchError(patch=patch, reason=e) return jsonpatch.apply_patch(doc, patch) def get_resource(resource, resource_ident): """Get the resource from the uuid or logical name. :param resource: the resource type. :param resource_ident: the UUID or logical name of the resource. :returns: The resource. """ resource = getattr(objects, resource) if uuidutils.is_uuid_like(resource_ident): return resource.get_by_uuid(pecan.request.context, resource_ident) return resource.get_by_name(pecan.request.context, resource_ident) def get_openstack_resource(manager, resource_ident, resource_type): """Get the openstack resource from the uuid or logical name. :param manager: the resource manager class. :param resource_ident: the UUID or logical name of the resource. :param resource_type: the type of the resource :returns: The openstack resource. :raises: ResourceNotFound if the openstack resource is not exist. Conflict if multi openstack resources have same name. """ if uuidutils.is_uuid_like(resource_ident): resource_data = manager.get(resource_ident) else: filters = {'name': resource_ident} matches = list(manager.list(filters=filters)) if len(matches) == 0: raise exception.ResourceNotFound(name=resource_type, id=resource_ident) if len(matches) > 1: msg = ("Multiple %(resource_type)s exist with same name " "%(resource_ident)s. Please use the resource id " "instead." % {'resource_type': resource_type, 'resource_ident': resource_ident}) raise exception.Conflict(msg) resource_data = matches[0] return resource_data magnum-6.1.0/magnum/api/rest_api_version_history.rst0000666000175100017510000000412513244017334022756 0ustar zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.1 --- This is the initial version of the v1.1 API which supports microversions. The v1.1 API is from the REST API users's point of view exactly the same as v1.0 except with strong input validation. A user can specify a header in the API request:: OpenStack-API-Version: where ```` is any valid api version for this API. If no version is specified then the API will behave as if a version request of v1.1 was requested. 1.2 --- Support for async cluster (previously known as bay) operations Before v1.2 all magnum bay operations were synchronous and as a result API requests were blocked until response from HEAT service is received. With this change cluster-create/bay-create, cluster-update/bay-update and cluster-delete/bay-delete calls will be asynchronous. 1.3 --- Rollback cluster (previously known as bay) on update failure User can enable rollback on bay update failure by specifying microversion 1.3 in header({'OpenStack-API-Version': 'container-infra 1.3'}) and passing 'rollback=True' when issuing cluster/bay update request. For example:- - http://XXX/v1/clusters/XXX/?rollback=True or - http://XXX/v1/bays/XXX/?rollback=True 1.4 --- Add stats API An admin user can get total number of clusters and nodes for a specified tenant or for all the tenants and also a non-admin user can get self stats. For example:- - http://XXX/v1/stats or - http://XXX/v1/stats?project_id= or - http://XXX/v1/stats?project_id=&type= 1.5 --- Support for cluster CA certificate rotation This gives admins a way to revoke access to an existing cluster once a user has been granted access. 1.6 --- Add quotas API An admin user can set/update/delete/list quotas for the given tenant. A non-admin user can get self quota information. magnum-6.1.0/magnum/conf/0000775000175100017510000000000013244017675015250 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/conf/profiler.py0000666000175100017510000000150713244017334017441 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import importutils profiler_opts = importutils.try_import('osprofiler.opts') def register_opts(conf): if profiler_opts: profiler_opts.set_defaults(conf) def list_opts(): return { profiler_opts._profiler_opt_group: profiler_opts._PROFILER_OPTS } magnum-6.1.0/magnum/conf/x509.py0000666000175100017510000000455013244017334016325 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.common.x509 import extensions from magnum.i18n import _ ALLOWED_EXTENSIONS = ['"%s"' % e.value for e in extensions.Extensions] DEFAULT_ALLOWED_EXTENSIONS = [ extensions.Extensions.KEY_USAGE.value, extensions.Extensions.EXTENDED_KEY_USAGE.value, extensions.Extensions.SUBJECT_ALTERNATIVE_NAME.value, extensions.Extensions.BASIC_CONSTRAINTS.value, extensions.Extensions.SUBJECT_KEY_IDENTIFIER.value] ALLOWED_KEY_USAGE = ['"%s"' % e.value[0] for e in extensions.KeyUsages] DEFAULT_ALLOWED_KEY_USAGE = [ extensions.KeyUsages.DIGITAL_SIGNATURE.value[0], extensions.KeyUsages.KEY_ENCIPHERMENT.value[0], extensions.KeyUsages.CONTENT_COMMITMENT.value[0]] x509_group = cfg.OptGroup(name='x509', title='Options for X509 in Magnum') x509_opts = [ cfg.BoolOpt('allow_ca', default=False, help=_('Certificate can get the CA flag in x509 extensions.')), cfg.ListOpt('allowed_extensions', default=DEFAULT_ALLOWED_EXTENSIONS, help=_('List of allowed x509 extensions. Available values: ' '%s') % ', '.join(ALLOWED_EXTENSIONS)), cfg.ListOpt('allowed_key_usage', default=DEFAULT_ALLOWED_KEY_USAGE, help=_('List of allowed x509 key usage. Available values: ' '%s') % ', '.join(ALLOWED_KEY_USAGE)), cfg.IntOpt('term_of_validity', default=365 * 5, help=_('Number of days for which a certificate is valid.')), cfg.IntOpt('rsa_key_size', default=2048, help=_('Size of generated private key. '))] def register_opts(conf): conf.register_group(x509_group) conf.register_opts(x509_opts, group=x509_group) def list_opts(): return { x509_group: x509_opts } magnum-6.1.0/magnum/conf/docker.py0000666000175100017510000000350613244017334017067 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg docker_group = cfg.OptGroup(name='docker', title='Options for Docker engine') docker_opts = [ cfg.StrOpt('docker_remote_api_version', default='1.21', help='Docker remote api version. Override it according to ' 'specific docker api version in your environment.'), cfg.IntOpt('default_timeout', default=60, help='Default timeout in seconds for docker client ' 'operations.'), cfg.BoolOpt('api_insecure', default=False, help='If set, ignore any SSL validation issues'), cfg.StrOpt('ca_file', help='Location of CA certificates file for ' 'securing docker api requests (tlscacert).'), cfg.StrOpt('cert_file', help='Location of TLS certificate file for ' 'securing docker api requests (tlscert).'), cfg.StrOpt('key_file', help='Location of TLS private key file for ' 'securing docker api requests (tlskey).'), ] def register_opts(conf): conf.register_group(docker_group) conf.register_opts(docker_opts, group=docker_group) def list_opts(): return { docker_group: docker_opts } magnum-6.1.0/magnum/conf/drivers.py0000666000175100017510000000417613244017334017302 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg drivers_group = cfg.OptGroup(name='drivers', title='Options for the Drivers') drivers_opts = [ cfg.BoolOpt('verify_ca', default=True, help='Indicates whether the cluster nodes validate the ' 'Certificate Authority when making requests to the ' 'OpenStack APIs (Keystone, Magnum, Heat). If you have ' 'self-signed certificates for the OpenStack APIs or ' 'you have your own Certificate Authority and you ' 'have not installed the Certificate Authority to all ' 'nodes, you may need to disable CA validation by ' 'setting this flag to False.'), cfg.StrOpt('openstack_ca_file', default="", help='Path to the OpenStack CA-bundle file to pass and ' 'install in all cluster nodes.'), cfg.BoolOpt('send_cluster_metrics', default=True, help='Allow periodic tasks to pull COE data and send to ' 'ceilometer.'), cfg.ListOpt('disabled_drivers', default=[], help='Disabled driver entry points. The default value is []. ' ' Means if not specified, then all available drivers ' 'are enabled.' ), ] def register_opts(conf): conf.register_group(drivers_group) conf.register_opts(drivers_opts, group=drivers_group) def list_opts(): return { drivers_group: drivers_opts, } magnum-6.1.0/magnum/conf/rpc.py0000666000175100017510000000177413244017334016411 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg periodic_opts = [ cfg.BoolOpt('periodic_enable', default=True, help='Enable periodic tasks.'), cfg.IntOpt('periodic_interval_max', default=60, help='Max interval size between periodic tasks execution in ' 'seconds.'), ] def register_opts(conf): conf.register_opts(periodic_opts) def list_opts(): return { "DEFAULT": periodic_opts } magnum-6.1.0/magnum/conf/services.py0000666000175100017510000000215013244017334017435 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ service_opts = [ cfg.StrOpt('host', help=_('Name of this node. This can be an opaque identifier. ' 'It is not necessarily a hostname, FQDN, or IP address. ' 'However, the node name must be valid within ' 'an AMQP key, and if using ZeroMQ, a valid ' 'hostname, FQDN, or IP address.')), ] def register_opts(conf): conf.register_opts(service_opts) def list_opts(): return { "DEFAULT": service_opts } magnum-6.1.0/magnum/conf/database.py0000666000175100017510000000227313244017334017364 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options from magnum.conf import paths _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('magnum.sqlite') database_group = cfg.OptGroup(name='database', title='Options for Magnum Database') sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help='MySQL engine to use.') ] def register_opts(conf): conf.register_group(database_group) conf.register_opts(sql_opts, group=database_group) options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) def list_opts(): return { database_group: sql_opts } magnum-6.1.0/magnum/conf/certificates.py0000666000175100017510000000313413244017334020262 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg DEFAULT_CERT_MANAGER = 'barbican' TLS_STORAGE_DEFAULT = '/var/lib/magnum/certificates/' certificates_group = cfg.OptGroup(name='certificates', title='Certificate options for the ' 'cert manager.') cert_manager_opts = [ cfg.StrOpt('cert_manager_type', default=DEFAULT_CERT_MANAGER, help='Certificate Manager plugin. ' 'Defaults to {0}.'.format(DEFAULT_CERT_MANAGER)) ] local_cert_manager_opts = [ cfg.StrOpt('storage_path', default=TLS_STORAGE_DEFAULT, help='Absolute path of the certificate storage directory. ' 'Defaults to /var/lib/magnum/certificates/.') ] ALL_OPTS = list(itertools.chain( cert_manager_opts, local_cert_manager_opts )) def register_opts(conf): conf.register_group(certificates_group) conf.register_opts(ALL_OPTS, group=certificates_group) def list_opts(): return { certificates_group: ALL_OPTS } magnum-6.1.0/magnum/conf/conductor.py0000666000175100017510000000272413244017334017621 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg conductor_group = cfg.OptGroup(name='conductor', title='Options for the magnum-conductor ' 'service') conductor_service_opts = [ cfg.StrOpt('topic', default='magnum-conductor', help='The queue to add conductor tasks to.'), cfg.IntOpt('conductor_life_check_timeout', default=4, help=('RPC timeout for the conductor liveness check that is ' 'used for cluster locking.')), cfg.IntOpt('workers', help='Number of magnum-conductor processes to fork and run. ' 'Default to number of CPUs on the host.') ] def register_opts(conf): conf.register_group(conductor_group) conf.register_opts(conductor_service_opts, group=conductor_group) def list_opts(): return { conductor_group: conductor_service_opts } magnum-6.1.0/magnum/conf/keystone.py0000666000175100017510000000347513244017334017466 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as ka_loading from oslo_config import cfg CFG_GROUP = 'keystone_auth' CFG_LEGACY_GROUP = 'keystone_authtoken' legacy_session_opts = { 'certfile': [cfg.DeprecatedOpt('certfile', CFG_LEGACY_GROUP)], 'keyfile': [cfg.DeprecatedOpt('keyfile', CFG_LEGACY_GROUP)], 'cafile': [cfg.DeprecatedOpt('cafile', CFG_LEGACY_GROUP)], 'insecure': [cfg.DeprecatedOpt('insecure', CFG_LEGACY_GROUP)], 'timeout': [cfg.DeprecatedOpt('timeout', CFG_LEGACY_GROUP)], } keystone_auth_group = cfg.OptGroup(name=CFG_GROUP, title='Options for Keystone in Magnum') def register_opts(conf): # FIXME(pauloewerton): remove import of authtoken group and legacy options # after deprecation period conf.import_group(CFG_LEGACY_GROUP, 'keystonemiddleware.auth_token') ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts=legacy_session_opts) conf.set_default('auth_type', default='password', group=CFG_GROUP) def list_opts(): keystone_auth_opts = (ka_loading.get_auth_common_conf_options() + ka_loading.get_auth_plugin_conf_options('password')) return { keystone_auth_group: keystone_auth_opts } magnum-6.1.0/magnum/conf/glance.py0000666000175100017510000000405413244017334017050 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ glance_group = cfg.OptGroup(name='glance_client', title='Options for the Glance client') glance_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='2', help=_('Version of Glance API to use in glanceclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( glance_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(glance_group) conf.register_opts(ALL_OPTS, group=glance_group) def list_opts(): return { glance_group: ALL_OPTS } magnum-6.1.0/magnum/conf/api.py0000666000175100017510000000413113244017334016364 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg api_group = cfg.OptGroup(name='api', title='Options for the magnum-api service') api_service_opts = [ cfg.PortOpt('port', default=9511, help='The port for the Magnum API server.'), cfg.IPOpt('host', default='127.0.0.1', help='The listen IP for the Magnum API server.'), cfg.IntOpt('max_limit', default=1000, help='The maximum number of items returned in a single ' 'response from a collection resource.'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help="Configuration file for WSGI definition of API." ), cfg.StrOpt('ssl_cert_file', help="This option allows setting path to the SSL certificate " "of API server. "), cfg.StrOpt('ssl_key_file', help="This option specifies the path to the file where SSL " "private key of API server is stored when SSL is in " "effect. "), cfg.BoolOpt('enabled_ssl', default=False, help='Enable SSL Magnum API service'), cfg.IntOpt('workers', help='The maximum number of magnum-api processes to ' 'fork and run. Default to number of CPUs on the host.') ] def register_opts(conf): conf.register_group(api_group) conf.register_opts(api_service_opts, group=api_group) def list_opts(): return { api_group: api_service_opts } magnum-6.1.0/magnum/conf/cinder.py0000666000175100017510000000324713244017334017066 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cinder_group = cfg.OptGroup( name='cinder', title='Options for the Cinder configuration') cinder_client_group = cfg.OptGroup( name='cinder_client', title='Options for the Cinder client') cinder_opts = [ cfg.StrOpt('default_docker_volume_type', default='', help=_('The default docker volume_type to use for volumes ' 'used for docker storage. To use the cinder volumes ' 'for docker storage, you need to select a default ' 'value.'))] cinder_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.'))] def register_opts(conf): conf.register_group(cinder_group) conf.register_group(cinder_client_group) conf.register_opts(cinder_opts, group=cinder_group) conf.register_opts(cinder_client_opts, group=cinder_client_group) def list_opts(): return { cinder_group: cinder_opts, cinder_client_group: cinder_client_opts } magnum-6.1.0/magnum/conf/heat.py0000666000175100017510000000402613244017334016537 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ heat_group = cfg.OptGroup(name='heat_client', title='Options for the Heat client') heat_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='1', help=_('Version of Heat API to use in heatclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( heat_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(heat_group) conf.register_opts(ALL_OPTS, group=heat_group) def list_opts(): return { heat_group: ALL_OPTS } magnum-6.1.0/magnum/conf/neutron.py0000666000175100017510000000366213244017334017315 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ neutron_group = cfg.OptGroup(name='neutron_client', title='Options for the neutron client') neutron_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( neutron_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(neutron_group) conf.register_opts(ALL_OPTS, group=neutron_group) def list_opts(): return { neutron_group: ALL_OPTS } magnum-6.1.0/magnum/conf/docker_registry.py0000666000175100017510000000240613244017334021015 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ docker_registry_group = cfg.OptGroup(name='docker_registry', title='Options for Docker Registry') docker_registry_opts = [ cfg.StrOpt('swift_region', help=_('Region name of Swift')), cfg.StrOpt('swift_registry_container', default='docker_registry', help=_('Name of the container in Swift which docker registry ' 'stores images in')) ] def register_opts(conf): conf.register_group(docker_registry_group) conf.register_opts(docker_registry_opts, group=docker_registry_group) def list_opts(): return { docker_registry_group: docker_registry_opts } magnum-6.1.0/magnum/conf/barbican.py0000666000175100017510000000251413244017334017357 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ barbican_group = cfg.OptGroup(name='barbican_client', title='Options for the Barbican client') barbican_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] def register_opts(conf): conf.register_group(barbican_group) conf.register_opts(barbican_client_opts, group=barbican_group) def list_opts(): return { barbican_group: barbican_client_opts } magnum-6.1.0/magnum/conf/magnum_client.py0000666000175100017510000000253313244017334020441 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ magnum_client_group = cfg.OptGroup(name='magnum_client', title='Options for the Magnum client') magnum_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] def register_opts(conf): conf.register_group(magnum_client_group) conf.register_opts(magnum_client_opts, group=magnum_client_group) def list_opts(): return { magnum_client_group: magnum_client_opts } magnum-6.1.0/magnum/conf/cluster_templates.py0000666000175100017510000000562513244017334021363 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cluster_template_group = cfg.OptGroup(name='cluster_template', title='Options for cluster_template') cluster_template_opts = [ cfg.ListOpt('kubernetes_allowed_network_drivers', default=['all'], help=_("Allowed network drivers for kubernetes " "cluster-templates. Use 'all' keyword to allow all " "drivers supported for kubernetes cluster-templates. " "Supported network drivers include flannel."), deprecated_group='baymodel'), cfg.StrOpt('kubernetes_default_network_driver', default='flannel', help=_("Default network driver for kubernetes " "cluster-templates."), deprecated_group='baymodel'), cfg.ListOpt('swarm_allowed_network_drivers', default=['all'], help=_("Allowed network drivers for docker swarm " "cluster-templates. Use 'all' keyword to allow all " "drivers supported for swarm cluster-templates. " "Supported network drivers include docker and flannel." ), deprecated_group='baymodel'), cfg.StrOpt('swarm_default_network_driver', default='docker', help=_("Default network driver for docker swarm " "cluster-templates."), deprecated_group='baymodel'), cfg.ListOpt('mesos_allowed_network_drivers', default=['all'], help=_("Allowed network drivers for mesos cluster-templates. " "Use 'all' keyword to allow all drivers supported " "for mesos cluster-templates. Supported network " "drivers include docker."), deprecated_group='baymodel'), cfg.StrOpt('mesos_default_network_driver', default='docker', help=_("Default network driver for mesos cluster-templates."), deprecated_group='baymodel'), ] def register_opts(conf): conf.register_group(cluster_template_group) conf.register_opts(cluster_template_opts, group=cluster_template_group) def list_opts(): return { cluster_template_group: cluster_template_opts } magnum-6.1.0/magnum/conf/__init__.py0000666000175100017510000000432513244017334017357 0ustar zuulzuul00000000000000# Copyright 2016 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from magnum.conf import api from magnum.conf import barbican from magnum.conf import certificates from magnum.conf import cinder from magnum.conf import cluster from magnum.conf import cluster_heat from magnum.conf import cluster_templates from magnum.conf import conductor from magnum.conf import database from magnum.conf import docker from magnum.conf import docker_registry from magnum.conf import drivers from magnum.conf import glance from magnum.conf import heat from magnum.conf import keystone from magnum.conf import magnum_client from magnum.conf import neutron from magnum.conf import nova from magnum.conf import paths from magnum.conf import profiler from magnum.conf import quota from magnum.conf import rpc from magnum.conf import services from magnum.conf import trust from magnum.conf import utils from magnum.conf import x509 CONF = cfg.CONF api.register_opts(CONF) barbican.register_opts(CONF) cluster.register_opts(CONF) cluster_templates.register_opts(CONF) cluster_heat.register_opts(CONF) certificates.register_opts(CONF) cinder.register_opts(CONF) conductor.register_opts(CONF) database.register_opts(CONF) docker.register_opts(CONF) docker_registry.register_opts(CONF) drivers.register_opts(CONF) glance.register_opts(CONF) heat.register_opts(CONF) keystone.register_opts(CONF) magnum_client.register_opts(CONF) neutron.register_opts(CONF) nova.register_opts(CONF) paths.register_opts(CONF) quota.register_opts(CONF) rpc.register_opts(CONF) services.register_opts(CONF) trust.register_opts(CONF) utils.register_opts(CONF) x509.register_opts(CONF) profiler.register_opts(CONF) magnum-6.1.0/magnum/conf/nova.py0000666000175100017510000000402613244017334016561 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ nova_group = cfg.OptGroup(name='nova_client', title='Options for the nova client') nova_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='2', help=_('Version of Nova API to use in novaclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( nova_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(nova_group) conf.register_opts(ALL_OPTS, group=nova_group) def list_opts(): return { nova_group: ALL_OPTS } magnum-6.1.0/magnum/conf/cluster_heat.py0000666000175100017510000000406413244017334020302 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg cluster_heat_group = cfg.OptGroup(name='cluster_heat', title='Heat options for Cluster ' 'configuration') cluster_heat_opts = [ cfg.IntOpt('max_attempts', default=2000, help=('Number of attempts to query the Heat stack for ' 'finding out the status of the created stack and ' 'getting template outputs. This value is ignored ' 'during cluster creation if timeout is set as the poll ' 'will continue until cluster creation either ends ' 'or times out.'), deprecated_group='bay_heat'), cfg.IntOpt('wait_interval', default=1, help=('Sleep time interval between two attempts of querying ' 'the Heat stack. This interval is in seconds.'), deprecated_group='bay_heat'), cfg.IntOpt('create_timeout', default=60, help=('The length of time to let cluster creation continue. ' 'This interval is in minutes. The default is 60 minutes.' ), deprecated_group='bay_heat', deprecated_name='bay_create_timeout') ] def register_opts(conf): conf.register_group(cluster_heat_group) conf.register_opts(cluster_heat_opts, group=cluster_heat_group) def list_opts(): return { cluster_heat_group: cluster_heat_opts } magnum-6.1.0/magnum/conf/trust.py0000666000175100017510000000567713244017334017014 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ trust_group = cfg.OptGroup(name='trust', title='Trustee options for the magnum services') trust_opts = [ cfg.BoolOpt('cluster_user_trust', default=False, help=_('This setting controls whether to assign a trust to' ' the cluster user or not. You will need to set it to' ' True for clusters with volume_driver=cinder or' ' registry_enabled=true in the underlying cluster' ' template to work. This is a potential security risk' ' since the trust gives instances OpenStack API access' " to the cluster's project. Note that this setting" ' does not affect per-cluster trusts assigned to the' 'Magnum service user.')), cfg.StrOpt('trustee_domain_id', help=_('Id of the domain to create trustee for clusters')), cfg.StrOpt('trustee_domain_name', help=_('Name of the domain to create trustee for s')), cfg.StrOpt('trustee_domain_admin_id', help=_('Id of the admin with roles sufficient to manage users' ' in the trustee_domain')), cfg.StrOpt('trustee_domain_admin_name', help=_('Name of the admin with roles sufficient to manage users' ' in the trustee_domain')), cfg.StrOpt('trustee_domain_admin_domain_id', help=_('Id of the domain admin user\'s domain.' ' trustee_domain_id is used by default')), cfg.StrOpt('trustee_domain_admin_domain_name', help=_('Name of the domain admin user\'s domain.' ' trustee_domain_name is used by default')), cfg.StrOpt('trustee_domain_admin_password', secret=True, help=_('Password of trustee_domain_admin')), cfg.ListOpt('roles', default=[], help=_('The roles which are delegated to the trustee ' 'by the trustor')), cfg.StrOpt('trustee_keystone_interface', default='public', help=_('Auth interface used by instances/trustee')) ] def register_opts(conf): conf.register_group(trust_group) conf.register_opts(trust_opts, group=trust_group) def list_opts(): return { trust_group: trust_opts } magnum-6.1.0/magnum/conf/cluster.py0000666000175100017510000000410413244017334017274 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cluster_group = cfg.OptGroup(name='cluster', title='Options for Cluster configuration') cluster_def_opts = [ cfg.StrOpt('etcd_discovery_service_endpoint_format', default='https://discovery.etcd.io/new?size=%(size)d', help=_('Url for etcd public discovery endpoint.'), deprecated_group='bay'), cfg.ListOpt('enabled_definitions', deprecated_for_removal=True, deprecated_reason=_('This configuration option is no longer ' 'used. Installing a new driver enables ' 'it for use automatically.'), default=['magnum_vm_atomic_k8s', 'magnum_bm_fedora_k8s', 'magnum_vm_coreos_k8s', 'magnum_vm_atomic_swarm', 'magnum_vm_ubuntu_mesos'], help=_('Enabled cluster definition entry points.'), deprecated_group='bay'), cfg.StrOpt('nodes_affinity_policy', default='soft-anti-affinity', help=_('Affinity policy for server group of cluster nodes.' 'Possible values include "affinity", "anti-affinity",' '"soft-affinity" and "soft-anti-affinity".') ), ] def register_opts(conf): conf.register_group(cluster_group) conf.register_opts(cluster_def_opts, group=cluster_group) def list_opts(): return { cluster_group: cluster_def_opts } magnum-6.1.0/magnum/conf/paths.py0000666000175100017510000000352413244017334016737 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg path_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the magnum python module is installed.'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where magnum binaries are installed.'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining magnum's state."), ] def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def bindir_def(*args): """Return an uninterpolated path relative to $bindir.""" return os.path.join('$bindir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def register_opts(conf): conf.register_opts(path_opts) def list_opts(): return { "DEFAULT": path_opts } magnum-6.1.0/magnum/conf/opts.py0000666000175100017510000000522713244017334016607 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the single point of entry to generate the sample configuration file for Magnum. It collects all the necessary info from the other modules in this package. It is assumed that: * every other module in this package has a 'list_opts' function which return a dict where * the keys are strings which are the group names * the value of each key is a list of config options for that group * the magnum.conf package doesn't have further packages with config options * this module is only used in the context of sample file generation """ import collections import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = "list_opts" def _tupleize(dct): """Take the dict of options and convert to the 2-tuple format.""" return [(key, val) for key, val in dct.items()] def list_opts(): opts = collections.defaultdict(list) module_names = _list_module_names() imported_modules = _import_modules(module_names) _append_config_options(imported_modules, opts) return _tupleize(opts) def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]): if modname == "opts" or ispkg: continue else: module_names.append(modname) return module_names def _import_modules(module_names): imported_modules = [] for modname in module_names: mod = importlib.import_module("magnum.conf." + modname) if not hasattr(mod, LIST_OPTS_FUNC_NAME): msg = "The module 'magnum.conf.%s' should have a '%s' "\ "function which returns the config options." % \ (modname, LIST_OPTS_FUNC_NAME) raise AttributeError(msg) else: imported_modules.append(mod) return imported_modules def _append_config_options(imported_modules, config_options): for mod in imported_modules: configs = mod.list_opts() for key, val in configs.items(): config_options[key].extend(val) magnum-6.1.0/magnum/conf/utils.py0000666000175100017510000000363713244017334016765 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ['23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz'] # Removed: l utils_opts = [ cfg.StrOpt('rootwrap_config', default="/etc/magnum/rootwrap.conf", help='Path to the rootwrap configuration file to use for ' 'running commands as root.'), cfg.StrOpt('tempdir', help='Explicitly specify the temporary working directory.'), cfg.ListOpt('password_symbols', default=DEFAULT_PASSWORD_SYMBOLS, help='Symbols to use for passwords') ] periodic_opts = [ cfg.IntOpt('service_down_time', default=180, help='Max interval size between periodic tasks execution in ' 'seconds.'), ] urlfetch_opts = [ cfg.IntOpt('max_manifest_size', default=524288, help=_('Maximum raw byte size of any manifest.')) ] ALL_OPTS = list(itertools.chain( utils_opts, periodic_opts, urlfetch_opts )) def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return { "DEFAULT": ALL_OPTS } magnum-6.1.0/magnum/conf/quota.py0000666000175100017510000000240413244017334016745 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ quotas_group = cfg.OptGroup(name='quotas', title='Options for quota configuration') quotas_def_opts = [ cfg.IntOpt('max_clusters_per_project', default=20, help=_('Max number of clusters allowed per project. Admin can ' 'override this default quota for a project by setting ' 'explicit limit in quotas DB table (using /quotas REST ' 'API endpoint).')), ] def register_opts(conf): conf.register_group(quotas_group) conf.register_opts(quotas_def_opts, group=quotas_group) def list_opts(): return { quotas_group: quotas_def_opts } magnum-6.1.0/magnum/drivers/0000775000175100017510000000000013244017675016001 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/0000775000175100017510000000000013244017675021770 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/version.py0000666000175100017510000000126213244017334024022 0ustar zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'k8s_fedora_atomic_v1' container_version = '1.12.6' magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/driver.py0000666000175100017510000000254713244017334023637 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.common import k8s_monitor from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_atomic_v1 import template_def class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'fedora-atomic', 'coe': 'kubernetes'}, ] def get_template_definition(self): return template_def.AtomicK8sTemplateDefinition() def get_monitor(self, context, cluster): return k8s_monitor.K8sMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): # FIXME: Until the kubernetes client is fixed, remove # the scale_manager. # https://bugs.launchpad.net/magnum/+bug/1746510 return None magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/tools/0000775000175100017510000000000013244017675023130 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/tools/grafana-prometheus-dashboard.json0000666000175100017510000016324613244017334031546 0ustar zuulzuul00000000000000{ "__inputs": [ { "name": "DS_PROMETHEUS", "label": "Prometheus", "description": "Compliant with Prometheus 1.5.2", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.1" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.3.0" } ], "id": null, "title": "Kubernetes cluster monitoring (via Prometheus)", "description": "Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.", "tags": [ "kubernetes" ], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [ { "collapse": false, "editable": true, "height": "200px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)", "thresholdLine": false }, "height": "200px", "id": 32, "isNew": true, "legend": { "alignAsTable": false, "avg": true, "current": true, "max": false, "min": false, "rightSide": false, "show": false, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", "interval": "10s", "intervalFactor": 1, "legendFormat": "Received", "metric": "network", "refId": "A", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", "interval": "10s", "intervalFactor": 1, "legendFormat": "Sent", "metric": "network", "refId": "B", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Network I/O pressure", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "transparent": false, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Network I/O pressure" }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 4, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "65, 90", "title": "Cluster memory usage", "transparent": false, "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 6, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "65, 90", "title": "Cluster CPU usage (1m avg)", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 7, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", "interval": "10s", "intervalFactor": 1, "legendFormat": "", "metric": "", "refId": "A", "step": 10 } ], "thresholds": "65, 90", "title": "Cluster filesystem usage", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 9, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "20%", "prefix": "", "prefixFontSize": "20%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 10, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "none", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 11, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": " cores", "postfixFontSize": "30%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m]))", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "none", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 12, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": " cores", "postfixFontSize": "30%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 13, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "id": 14, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [ { "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 } ], "thresholds": "", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" } ], "showTitle": false, "title": "Total usage" }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 3, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "height": "", "id": 17, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ pod_name }}", "metric": "container_cpu", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Pods CPU usage (1m avg)", "tooltip": { "msResolution": true, "shared": true, "sort": 2, "value_type": "cumulative" }, "transparent": false, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "none", "label": "cores", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "showTitle": false, "title": "Pods CPU usage" }, { "collapse": true, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 3, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "height": "", "id": 23, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (systemd_service_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ systemd_service_name }}", "metric": "container_cpu", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "System services CPU usage (1m avg)", "tooltip": { "msResolution": true, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "none", "label": "cores", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "System services CPU usage" }, { "collapse": true, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 3, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "height": "", "id": 24, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": null, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", "metric": "container_cpu", "refId": "A", "step": 10 }, { "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", "metric": "container_cpu", "refId": "B", "step": 10 }, { "expr": "sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", "metric": "container_cpu", "refId": "C", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Containers CPU usage (1m avg)", "tooltip": { "msResolution": true, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "none", "label": "cores", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Containers CPU usage" }, { "collapse": true, "editable": true, "height": "500px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 3, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 20, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": false, "show": true, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ id }}", "metric": "container_cpu", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "All processes CPU usage (1m avg)", "tooltip": { "msResolution": true, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "none", "label": "cores", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "repeat": null, "showTitle": false, "title": "All processes CPU usage" }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 25, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ pod_name }}", "metric": "container_memory_usage:sort_desc", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Pods memory usage", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Pods memory usage" }, { "collapse": true, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 26, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (container_memory_working_set_bytes{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ systemd_service_name }}", "metric": "container_memory_usage:sort_desc", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "System services memory usage", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "System services memory usage" }, { "collapse": true, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 27, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", "metric": "container_memory_usage:sort_desc", "refId": "A", "step": 10 }, { "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)", "interval": "10s", "intervalFactor": 1, "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", "metric": "container_memory_usage:sort_desc", "refId": "B", "step": 10 }, { "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", "metric": "container_memory_usage:sort_desc", "refId": "C", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Containers memory usage", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Containers memory usage" }, { "collapse": true, "editable": true, "height": "500px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 28, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": false, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": true, "targets": [ { "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ id }}", "metric": "container_memory_usage:sort_desc", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "All processes memory usage", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "All processes memory usage" }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 16, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "-> {{ pod_name }}", "metric": "network", "refId": "A", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", "interval": "10s", "intervalFactor": 1, "legendFormat": "<- {{ pod_name }}", "metric": "network", "refId": "B", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Pods network I/O (1m avg)", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Pods network I/O" }, { "collapse": true, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 30, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}", "metric": "network", "refId": "B", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}", "metric": "network", "refId": "D", "step": 10 }, { "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", "metric": "network", "refId": "A", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", "metric": "network", "refId": "C", "step": 10 }, { "expr": "sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", "metric": "network", "refId": "E", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", "hide": false, "interval": "10s", "intervalFactor": 1, "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", "metric": "network", "refId": "F", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Containers network I/O (1m avg)", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "Containers network I/O" }, { "collapse": true, "editable": true, "height": "500px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 29, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": false, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", "interval": "10s", "intervalFactor": 1, "legendFormat": "-> {{ id }}", "metric": "network", "refId": "A", "step": 10 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", "interval": "10s", "intervalFactor": 1, "legendFormat": "<- {{ id }}", "metric": "network", "refId": "B", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "All processes network I/O (1m avg)", "tooltip": { "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ] } ], "title": "All processes network I/O" } ], "time": { "from": "now-5m", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [ { "allValue": ".*", "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": true, "multi": false, "name": "Node", "options": [], "query": "label_values(kubernetes_io_hostname)", "refresh": 1, "type": "query" } ] }, "annotations": { "list": [] }, "refresh": "10s", "schemaVersion": 12, "version": 13, "links": [], "gnetId": 1621 }magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/template_def.py0000666000175100017510000000213413244017334024765 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import magnum.conf from magnum.drivers.heat import k8s_fedora_template_def as kftd CONF = magnum.conf.CONF class AtomicK8sTemplateDefinition(kftd.K8sFedoraTemplateDefinition): """Kubernetes template for a Fedora Atomic VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/__init__.py0000666000175100017510000000000013244017334024061 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/0000775000175100017510000000000013244017675023766 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml0000666000175100017510000003621213244017334027010 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes minion, This stack is included by an AutoScalingGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server minion_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" tls_disabled: type: boolean description: whether or not to enable TLS verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have the node-exporter running on the node kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number description : > timeout for the Wait Conditions registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. registry_port: type: number description: port of registry service swift_region: type: string description: region of swift service registry_container: type: string description: > name of swift container which docker registry stores images in registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file hidden: true http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster flannel_tag: type: string description: tag of the flannel system containers kube_version: type: string description: version of kubernetes used for kubernetes cluster trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: > url for keystone, must be v2 since k8s backend only support v2 at this point insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen kubelet_options: type: string description: > additional options to be passed to the kubelet kubeproxy_options: type: string description: > additional options to be passed to the kube proxy resources: minion_wait_handle: type: OS::Heat::WaitConditionHandle minion_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-minion properties: handle: {get_resource: minion_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.yaml} params: $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} $KUBE_MASTER_IP: {get_param: kube_master_ip} $KUBE_API_PORT: {get_param: kubernetes_port} $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} $ETCD_SERVER_IP: {get_param: etcd_server_ip} $DOCKER_VOLUME: {get_resource: docker_volume} $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} $NETWORK_DRIVER: {get_param: network_driver} $REGISTRY_ENABLED: {get_param: registry_enabled} $REGISTRY_PORT: {get_param: registry_port} $SWIFT_REGION: {get_param: swift_region} $REGISTRY_CONTAINER: {get_param: registry_container} $REGISTRY_INSECURE: {get_param: registry_insecure} $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} $TLS_DISABLED: {get_param: tls_disabled} $VERIFY_CA: {get_param: verify_ca} $CLUSTER_UUID: {get_param: cluster_uuid} $MAGNUM_URL: {get_param: magnum_url} $USERNAME: {get_param: username} $PASSWORD: {get_param: password} $VOLUME_DRIVER: {get_param: volume_driver} $REGION_NAME: {get_param: region_name} $HTTP_PROXY: {get_param: http_proxy} $HTTPS_PROXY: {get_param: https_proxy} $NO_PROXY: {get_param: no_proxy} $KUBE_TAG: {get_param: kube_tag} $FLANNEL_TAG: {get_param: flannel_tag} $KUBE_VERSION: {get_param: kube_version} $WAIT_CURL: {get_attr: [minion_wait_handle, curl_cli]} $TRUSTEE_USER_ID: {get_param: trustee_user_id} $TRUSTEE_PASSWORD: {get_param: trustee_password} $TRUST_ID: {get_param: trust_id} $AUTH_URL: {get_param: auth_url} $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} $DNS_SERVICE_IP: {get_param: dns_service_ip} $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} $KUBELET_OPTIONS: {get_param: kubelet_options} $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} write_kube_os_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} configure_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} configure_kubernetes_minion: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh} network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} enable_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh} enable_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} enable_node_exporter: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-node-exporter.sh} minion_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v if [ "verify_ca" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi wc_notify $VERIFY_CA --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [minion_wait_handle, curl_cli]} verify_ca: {get_param: verify_ca} disable_selinux: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} kube_minion_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: disable_selinux} - config: {get_resource: write_heat_params} - config: {get_resource: write_kube_os_config} - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_docker_registry} - config: {get_resource: configure_kubernetes_minion} - config: {get_resource: network_service} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: enable_node_exporter} - config: {get_resource: enable_docker_registry} - config: {get_resource: minion_wc_notify} ###################################################################### # # a single kubernetes minion. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-minion: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_minion_init} networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} kube_minion_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_param: secgroup_kube_minion_id fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_minion_floating: type: Magnum::Optional::KubeMinion::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: kube-minion} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. OS::stack_id: value: {get_param: "OS::stack_id"} description: > This is a id of the stack which creates from this template. magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml0000666000175100017510000006017013244017334027200 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Kubernetes cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1). parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server for master nodes minion_flavor: type: string default: m1.small description: flavor to use when booting the server for minions prometheus_monitoring: type: boolean default: false description: > whether or not to have the grafana-prometheus-cadvisor monitoring setup grafana_admin_passwd: type: string default: admin hidden: true description: > admin user password for the Grafana monitoring interface dns_nameserver: type: string description: address of a DNS nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 fixed_network_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 network_driver: type: string description: network driver to use for instantiating container networks default: flannel flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] etcd_volume_size: type: number description: > size of the cinder volume for etcd storage default: 0 docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing an create. default: [] discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 swift_region: type: string description: region of swift service default: "" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 volume_driver: type: string description: volume driver to use for container storage default: "" region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file default: ChangeMe hidden: true loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean description: whether or not to disable TLS default: False kube_dashboard_enabled: type: boolean description: whether or not to enable kubernetes dashboard default: True influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster default: False verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster default: v1.9.3 etcd_tag: type: string description: tag of the etcd system container default: v3.2.7 flannel_tag: type: string description: tag of the flannel system containers default: v0.9.0 kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.9.3 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster default: v1.8.3 insecure_registry_url: type: string description: insecure registry url default: "" container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc constraints: - allowed_pattern: "^$|.*/" default: "" dns_service_ip: type: string description: > address used by Kubernetes DNS service default: 10.254.0.10 dns_cluster_domain: type: string description: > domain name for cluster DNS default: "cluster.local" openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] availability_zone: type: string description: > availability zone for master and nodes default: "" cert_manager_api: type: boolean description: true if the kubernetes cert api manager should be enabled default: false ca_key: type: string description: key of internal ca for the kube certificate api manager default: "" hidden: true calico_tag: type: string description: tag of the calico containers used to provision the calico node default: v2.6.7 calico_cni_tag: type: string description: tag of the cni used to provision the calico node default: v1.11.2 calico_kube_controllers_tag: type: string description: tag of the kube_controllers used to provision the calico node default: v1.0.3 calico_ipv4pool: type: string description: Configure the IP pool from which Pod IPs will be chosen default: "192.168.0.0/16" pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen ingress_controller: type: string description: > ingress controller backend to use default: "" ingress_controller_role: type: string description: > node role where the ingress controller backend should run default: "ingress" kubelet_options: type: string description: > additional options to be passed to the kubelet default: "" kubeapi_options: type: string description: > additional options to be passed to the api default: "" kubecontroller_options: type: string description: > additional options to be passed to the controller manager default: "" kubeproxy_options: type: string description: > additional options to be passed to the kube proxy default: "" kubescheduler_options: type: string description: > additional options to be passed to the scheduler default: "" resources: ###################################################################### # # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly # network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} private_network_name: private api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: kubernetes_port} etcd_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: 2379 ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_kube_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 - protocol: tcp port_range_min: 30000 port_range_max: 32767 secgroup_kube_minion: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_lb, address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # kubernetes masters. This is a resource group that will create # masters. # kube_masters: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] prometheus_monitoring: {get_param: prometheus_monitoring} grafana_admin_passwd: {get_param: grafana_admin_passwd} api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} etcd_volume_size: {get_param: etcd_volume_size} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} wait_condition_timeout: {get_param: wait_condition_timeout} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} admission_control_list: {get_param: admission_control_list} discovery_url: {get_param: discovery_url} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} volume_driver: {get_param: volume_driver} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} api_pool_id: {get_attr: [api_lb, pool_id]} etcd_pool_id: {get_attr: [etcd_lb, pool_id]} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_kube_master_id: {get_resource: secgroup_kube_master} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} etcd_tag: {get_param: etcd_tag} kube_dashboard_version: {get_param: kube_dashboard_version} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} etcd_lb_vip: {get_attr: [etcd_lb, address]} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} ca_key: {get_param: ca_key} cert_manager_api: {get_param: cert_manager_api} calico_tag: {get_param: calico_tag} calico_cni_tag: {get_param: calico_cni_tag} calico_kube_controllers_tag: {get_param: calico_kube_controllers_tag} calico_ipv4pool: {get_param: calico_ipv4pool} pods_network_cidr: {get_param: pods_network_cidr} ingress_controller: {get_param: ingress_controller} ingress_controller_role: {get_param: ingress_controller_role} kubelet_options: {get_param: kubelet_options} kubeapi_options: {get_param: kubeapi_options} kubeproxy_options: {get_param: kubeproxy_options} kubecontroller_options: {get_param: kubecontroller_options} kubescheduler_options: {get_param: kubescheduler_options} ###################################################################### # # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'minion', '%index%'] prometheus_monitoring: {get_param: prometheus_monitoring} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} wait_condition_timeout: {get_param: wait_condition_timeout} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} swift_region: {get_param: swift_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} flannel_tag: {get_param: flannel_tag} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} pods_network_cidr: {get_param: pods_network_cidr} kubelet_options: {get_param: kubelet_options} kubeproxy_options: {get_param: kubeproxy_options} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. registry_address: value: str_replace: template: localhost:port params: port: {get_param: registry_port} description: This is the url of docker registry server where you can store docker images. kube_masters_private: value: {get_attr: [kube_masters, kube_master_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes masters. kube_masters: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions_private: value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions: value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml0000666000175100017510000005576413244017334027027 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes master, This stack is included by an ResourceGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] etcd_volume_size: type: number description: > size of a cinder volume to allocate for etcd storage docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" volume_driver: type: string description: volume driver to use for container storage flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean description: whether or not to enable TLS kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have prometheus and grafana deployed grafana_admin_passwd: type: string hidden: true description: > admin user password for the Grafana monitoring interface api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks wait_condition_timeout: type: number description : > timeout for the Wait Conditions secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. auth_url: type: string description: > url for kubernetes to authenticate username: type: string description: > user account password: type: string description: > user password http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster etcd_tag: type: string description: tag of the etcd system container kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc etcd_lb_vip: type: string description: > etcd lb vip private used to generate certs on master. default: "" dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" ca_key: type: string description: key of internal ca for the kube certificate api manager hidden: true cert_manager_api: type: boolean description: true if the kubernetes cert api manager should be enabled default: false calico_tag: type: string description: tag of the calico containers used to provision the calico node calico_cni_tag: type: string description: tag of the cni used to provision the calico node calico_kube_controllers_tag: type: string description: tag of the kube_controllers used to provision the calico node calico_ipv4pool: type: string description: Configure the IP pool from which Pod IPs will be chosen pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen ingress_controller: type: string description: > ingress controller backend to use ingress_controller_role: type: string description: > node role where the ingress controller should run kubelet_options: type: string description: > additional options to be passed to the kubelet kubeapi_options: type: string description: > additional options to be passed to the api kubecontroller_options: type: string description: > additional options to be passed to the controller manager kubeproxy_options: type: string description: > additional options to be passed to the kube proxy kubescheduler_options: type: string description: > additional options to be passed to the scheduler resources: master_wait_handle: type: OS::Heat::WaitConditionHandle master_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-master properties: handle: {get_resource: master_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # resource that exposes the IPs of either the kube master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_public_address} pool_private_ip: {get_param: api_private_address} master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} params: "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$ETCD_VOLUME": {get_resource: etcd_volume} "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$VERIFY_CA": {get_param: verify_ca} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$VOLUME_DRIVER": {get_param: volume_driver} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$KUBE_TAG": {get_param: kube_tag} "$ETCD_TAG": {get_param: etcd_tag} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} "$ETCD_LB_VIP": {get_param: etcd_lb_vip} "$DNS_SERVICE_IP": {get_param: dns_service_ip} "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} "$CERT_MANAGER_API": {get_param: cert_manager_api} "$CA_KEY": {get_param: ca_key} "$CALICO_TAG": {get_param: calico_tag} "$CALICO_CNI_TAG": {get_param: calico_cni_tag} "$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag} "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} "$INGRESS_CONTROLLER": {get_param: ingress_controller} "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} "$KUBELET_OPTIONS": {get_param: kubelet_options} "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/make-cert.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh} write_kube_os_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh} write_network_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/write-network-config.sh} network_config_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/network-config-service.sh} enable_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} kube_apiserver_to_kubelet_role: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh} core_dns_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh} master_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh} disable_selinux: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} start_container_agent: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} kube_master_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: disable_selinux} - config: {get_resource: write_heat_params} - config: {get_resource: configure_etcd} - config: {get_resource: write_kube_os_config} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_kubernetes} - config: {get_resource: make_cert} - config: {get_resource: add_proxy} - config: {get_resource: start_container_agent} - config: {get_resource: enable_services} - config: {get_resource: write_network_config} - config: {get_resource: network_config_service} - config: {get_resource: kube_apiserver_to_kubelet_role} - config: {get_resource: core_dns_service} - config: {get_resource: master_wc_notify} enable_prometheus_monitoring: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring} params: "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} enable_prometheus_monitoring_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: enable_prometheus_monitoring} server: {get_resource: kube-master} actions: ['CREATE'] enable_cert_manager_api: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager} params: "$CA_KEY": {get_param: ca_key} enable_cert_manager_api_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: enable_cert_manager_api} server: {get_resource: kube-master} actions: ['CREATE'] calico_service: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh} calico_service_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: calico_service} server: {get_resource: kube-master} actions: ['CREATE'] enable_ingress_controller: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: params: $enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik} template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller} enable_ingress_controller_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: enable_ingress_controller} server: {get_resource: kube-master} actions: ['CREATE'] kubernetes_dashboard: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} kubernetes_dashboard_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: kubernetes_dashboard} server: {get_resource: kube-master} actions: ['CREATE'] ###################################################################### # # a single kubernetes master. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-master: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT user_data: {get_resource: kube_master_init} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} kube_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_kube_master_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_master_floating: type: Magnum::Optional::KubeMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_master_eth0} api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 ###################################################################### # # etcd storage. This allocates a cinder volume and attaches it # to the master. # etcd_volume: type: Magnum::Optional::Etcd::Volume properties: size: {get_param: etcd_volume_size} etcd_volume_attach: type: Magnum::Optional::Etcd::VolumeAttachment properties: instance_uuid: {get_resource: kube-master} volume_id: {get_resource: etcd_volume} mountpoint: /dev/vdc ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: kube-master} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/COPYING0000666000175100017510000002613613244017334025023 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/magnum/drivers/k8s_coreos_v1/0000775000175100017510000000000013244017675020466 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_coreos_v1/version.py0000666000175100017510000000125313244017334022520 0ustar zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'k8s_coreos_v1' container_version = '1.11.2' magnum-6.1.0/magnum/drivers/k8s_coreos_v1/driver.py0000666000175100017510000000253113244017334022326 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.common import k8s_monitor from magnum.drivers.heat import driver from magnum.drivers.k8s_coreos_v1 import template_def class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'coreos', 'coe': 'kubernetes'}, ] def get_template_definition(self): return template_def.CoreOSK8sTemplateDefinition() def get_monitor(self, context, cluster): return k8s_monitor.K8sMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): # FIXME: Until the kubernetes client is fixed, remove # the scale_manager. # https://bugs.launchpad.net/magnum/+bug/1746510 return None magnum-6.1.0/magnum/drivers/k8s_coreos_v1/template_def.py0000666000175100017510000000330313244017334023462 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import magnum.conf from magnum.drivers.heat import k8s_template_def from magnum.drivers.heat import template_def CONF = magnum.conf.CONF class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): """Kubernetes template for CoreOS VM.""" def __init__(self): super(CoreOSK8sTemplateDefinition, self).__init__() self.add_output('kube_minions', cluster_attr='node_addresses') self.add_output('kube_masters', cluster_attr='master_addresses') def get_env_files(self, cluster_template, cluster): env_files = [] template_def.add_priv_net_env_file(env_files, cluster_template) template_def.add_lb_env_file(env_files, cluster_template) template_def.add_fip_env_file(env_files, cluster_template) return env_files @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') magnum-6.1.0/magnum/drivers/k8s_coreos_v1/__init__.py0000666000175100017510000000000013244017334022557 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/0000775000175100017510000000000013244017675022464 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml0000666000175100017510000002605513244017334025512 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes minion, based on a CoreOS cloud image. This stack is included by a ResourceGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server minion_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] network_driver: type: string description: network driver to use for instantiating container networks tls_disabled: type: boolean description: whether or not to enable TLS verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from kube_version: type: string description: version of kubernetes used for kubernetes cluster hyperkube_image: type: string description: > Docker registry used for hyperkube image kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number description: > timeout for the Wait Conditions secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker trustee_user_id: type: string description: user id of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone insecure_registry_url: type: string description: insecure registry url container_runtime: type: string description: > Container runtime to use with Kubernetes. prometheus_monitoring: type: boolean description: > whether or not to have the node-exporter running on the node dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: minion_wait_handle: type: OS::Heat::WaitConditionHandle minion_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-minion properties: handle: {get_resource: minion_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params.yaml} params: "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$KUBE_MASTER_IP": {get_param: kube_master_ip} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_minion_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} "$WAIT_CURL": {get_attr: [minion_wait_handle, curl_cli]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$NETWORK_DRIVER": {get_param: network_driver} "$ETCD_SERVER_IP": {get_param: etcd_server_ip} "$KUBE_VERSION": {get_param: kube_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$KUBE_CERTS_PATH": "/etc/kubernetes/ssl" "$HOST_CERTS_PATH": "/usr/share/ca-certificates" "$HYPERKUBE_IMAGE_REPO": str_replace: template: insecure_registry_urlhyperkube_image params: insecure_registry_url: { get_param: insecure_registry_url } hyperkube_image: { get_param: hyperkube_image } "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} "$CONTAINER_RUNTIME": {get_param: container_runtime} "$DNS_SERVICE_IP": {get_param: dns_service_ip} "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} add_ext_ca_certs: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: fragments/add-ext-ca-certs.yaml} write_kubeconfig: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/write-kubeconfig.yaml} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert-client.yaml} enable_network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-network-service-client.yaml} enable_kubelet: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kubelet-minion.yaml} enable_kube_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-proxy-minion.yaml} wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/wc-notify.yaml} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.yaml} configure_docker: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-docker.yaml} kube_minion_init: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | $add_ext_ca_certs $write_heat_params $write_kubeconfig $make_cert $configure_docker $add_proxy $enable_network_service $enable_kubelet $enable_kube_proxy $wc_notify coreos: units: - name: "add-ext-ca-certs.service" command: "start" - name: "make-cert.service" command: "start" - name: "configure-docker.service" command: "start" - name: "add-proxy.service" command: "start" - name: "enable-network-service.service" command: "start" - name: "enable-kubelet.service" command: "start" - name: "enable-kube-proxy.service" command: "start" - name: "wc-notify.service" command: "start" params: "$add_ext_ca_certs": {get_attr: [add_ext_ca_certs, config]} "$write_heat_params": {get_attr: [write_heat_params, config]} "$write_kubeconfig": {get_attr: [write_kubeconfig, config]} "$make_cert": {get_attr: [make_cert, config]} "$configure_docker": {get_attr: [configure_docker, config]} "$add_proxy": {get_attr: [add_proxy, config]} "$enable_network_service": {get_attr: [enable_network_service, config]} "$enable_kubelet": {get_attr: [enable_kubelet, config]} "$enable_kube_proxy": {get_attr: [enable_kube_proxy, config]} "$wc_notify": {get_attr: [wc_notify, config]} # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-minion: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_minion_init} networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} kube_minion_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_kube_minion_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_minion_floating: type: Magnum::Optional::KubeMinion::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. OS::stack_id: value: {get_param: "OS::stack_id"} description: > This is a id of the stack which creates from this template. magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml0000666000175100017510000004374013244017334025702 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a coreos cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1) and one master node. Allowing multiple masters is a work in progress. parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on the servers external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" server_image: type: string default: CoreOS description: glance image used to boot the servers master_flavor: type: string default: m1.small description: flavor to use when booting the server for master node minion_flavor: type: string default: m1.small description: flavor to use when booting the servers for minions prometheus_monitoring: type: boolean default: false description: > whether or not to have the grafana-prometheus-cadvisor monitoring setup grafana_admin_passwd: type: string default: admin hidden: true description: > admin user password for the Grafana monitoring interface discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. dns_nameserver: type: string description: address of a dns nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 fixed_network_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "host-gw" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing a create. default: [] network_driver: type: string description: network driver to use for instantiating container networks default: flannel tls_disabled: type: boolean description: whether or not to disable TLS default: False kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard default: True influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster default: False verify_ca: type: boolean description: whether or not to validate certificate authority loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.6.2_coreos.0 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster default: v1.5.1 hyperkube_image: type: string description: > Docker registry used for hyperkube image default: quay.io/coreos/hyperkube registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 insecure_registry_url: type: string description: insecure registry url constraints: - allowed_pattern: "^$|.*/" default: "" container_runtime: type: string description: > Container runtime to use with Kubernetes. default: "docker" constraints: - allowed_values: ["docker"] dns_service_ip: type: string description: > address used by Kubernetes DNS service default: 10.254.0.10 dns_cluster_domain: type: string description: > domain name for cluster DNS default: "cluster.local" openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] resources: ###################################################################### # # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly # network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} private_network_name: private api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: kubernetes_port} etcd_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: 2379 ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 secgroup_minion_all_open: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_lb, address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # kubernetes masters. This is a resource group that will create # master. # kube_masters: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} admission_control_list: {get_param: admission_control_list} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} discovery_url: {get_param: discovery_url} network_driver: {get_param: network_driver} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_kube_master_id: {get_resource: secgroup_master} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} kube_dashboard_version: {get_param: kube_dashboard_version} wait_condition_timeout: {get_param: wait_condition_timeout} cluster_uuid: {get_param: cluster_uuid} api_pool_id: {get_attr: [api_lb, pool_id]} etcd_pool_id: {get_attr: [etcd_lb, pool_id]} magnum_url: {get_param: magnum_url} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} hyperkube_image: {get_param: hyperkube_image} insecure_registry_url: {get_param: insecure_registry_url} container_runtime: {get_param: container_runtime} prometheus_monitoring: {get_param: prometheus_monitoring} grafana_admin_passwd: {get_param: grafana_admin_passwd} etcd_lb_vip: {get_attr: [etcd_lb, address]} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} ###################################################################### # # kubernetes minions. This is a resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: type: OS::Heat::ResourceGroup depends_on: - network - kube_masters properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'minion', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} network_driver: {get_param: network_driver} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} secgroup_kube_minion_id: {get_resource: secgroup_minion_all_open} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} wait_condition_timeout: {get_param: wait_condition_timeout} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} hyperkube_image: {get_param: hyperkube_image} insecure_registry_url: {get_param: insecure_registry_url} container_runtime: {get_param: container_runtime} prometheus_monitoring: {get_param: prometheus_monitoring} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. kube_masters_private: value: {get_attr: [kube_masters, kube_master_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes masters. kube_masters: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh or to access the Kubernetes API. kube_minions_private: value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions: value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml0000666000175100017510000004274513244017334025520 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a Kubernetes master. This stack is included by an ResourceGroup resource in the parent template (kubeclusters.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. wait_condition_timeout: type: number description : > timeout for the Wait Conditions secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. network_driver: type: string description: network driver to use for instantiating container networks tls_disabled: type: boolean description: whether or not to enable TLS kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard influx_grafana_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster hyperkube_image: type: string description: > Docker registry used for hyperkube image cluster_uuid: type: string description: identifier for the cluster this template is generating prometheus_monitoring: type: boolean description: > whether or not to have prometheus and grafana deployed grafana_admin_passwd: type: string hidden: true description: > admin user password for the Grafana monitoring interface magnum_url: type: string description: endpoint to retrieve TLS certs from api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker trustee_user_id: type: string description: user id of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone insecure_registry_url: type: string description: insecure registry url container_runtime: type: string description: > Container runtime to use with Kubernetes. etcd_lb_vip: type: string description: > etcd lb vip private used to generate certs on master. default: "" dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: master_wait_handle: type: OS::Heat::WaitConditionHandle master_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-master properties: handle: {get_resource: master_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # resource that exposes the IPs of either the kube master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_public_address} pool_private_ip: {get_param: api_private_address} master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-master.yaml} params: "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$NETWORK_DRIVER": {get_param: network_driver} "$KUBE_API_PORT": {get_param: kubernetes_port} "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: enable_influx_grafana_dashboard} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$KUBE_CERTS_PATH": "/etc/kubernetes/ssl" "$HOST_CERTS_PATH": "/usr/share/ca-certificates" "$HYPERKUBE_IMAGE_REPO": str_replace: template: insecure_registry_urlhyperkube_image params: insecure_registry_url: { get_param: insecure_registry_url } hyperkube_image: { get_param: hyperkube_image } "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} "$CONTAINER_RUNTIME": {get_param: container_runtime} "$ETCD_LB_VIP": {get_param: etcd_lb_vip} "$DNS_SERVICE_IP": {get_param: dns_service_ip} "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} add_ext_ca_certs: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: fragments/add-ext-ca-certs.yaml} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-etcd.yaml} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert.yaml} write_network_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/write-network-config.yaml} enable_network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-network-service.yaml} enable_kubelet: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kubelet-master.yaml} enable_kube_apiserver: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-apiserver.yaml} create_kube_namespace: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/create-kube-namespace.yaml} enable_kube_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-proxy-master.yaml} enable_kube_controller_manager: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-controller-manager.yaml} enable_kube_scheduler: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-scheduler.yaml} enable_kube_dashboard: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-kube-dashboard.yaml} wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/wc-notify.yaml} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.yaml} configure_docker: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-docker.yaml} enable_coredns: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-coredns.yaml} kube_master_init: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | $add_ext_ca_certs $write_heat_params $make_cert $configure_docker $add_proxy $configure_etcd $write_network_config $enable_network_service $enable_kubelet $enable_kube_apiserver $create_kube_namespace $enable_kube_proxy $enable_kube_controller_manager $enable_kube_scheduler $enable_kube_dashboard $enable_coredns $wc_notify coreos: units: - name: "add-ext-ca-certs.service" command: "start" - name: "make-cert.service" command: "start" - name: "configure-docker.service" command: "start" - name: "add-proxy.service" command: "start" - name: "configure-etcd.service" command: "start" - name: "write-network-config.service" command: "start" - name: "enable-network-service.service" command: "start" - name: "enable-kubelet.service" command: "start" - name: "enable-kube-apiserver.service" command: "start" - name: "create-kube-namespace.service" command: "start" - name: "enable-kube-proxy.service" command: "start" - name: "enable-kube-controller-manager.service" command: "start" - name: "enable-kube-scheduler.service" command: "start" - name: "enable-kube-dashboard.service" command: "start" - name: "enable-coredns.service" command: "start" - name: "wc-notify.service" command: "start" params: "$add_ext_ca_certs": {get_attr: [add_ext_ca_certs, config]} "$write_heat_params": {get_attr: [write_heat_params, config]} "$make_cert": {get_attr: [make_cert, config]} "$configure_docker": {get_attr: [configure_docker, config]} "$add_proxy": {get_attr: [add_proxy, config]} "$configure_etcd": {get_attr: [configure_etcd, config]} "$write_network_config": {get_attr: [write_network_config, config]} "$enable_network_service": {get_attr: [enable_network_service, config]} "$enable_kubelet": {get_attr: [enable_kubelet, config]} "$enable_kube_apiserver": {get_attr: [enable_kube_apiserver, config]} "$create_kube_namespace": {get_attr: [create_kube_namespace, config]} "$enable_kube_proxy": {get_attr: [enable_kube_proxy, config]} "$enable_kube_controller_manager": {get_attr: [enable_kube_controller_manager, config]} "$enable_kube_scheduler": {get_attr: [enable_kube_scheduler, config]} "$enable_kube_dashboard": {get_attr: [enable_kube_dashboard, config]} "$enable_coredns": {get_attr: [enable_coredns, config]} "$wc_notify": {get_attr: [wc_notify, config]} ###################################################################### # # a single kubernetes master. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-master: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_master_init} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} kube_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_kube_master_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_master_floating: type: Magnum::Optional::KubeMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_master_eth0} api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 outputs: kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/0000775000175100017510000000000013244017675024452 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml0000666000175100017510000000336213244017334030666 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" KUBE_MASTER_IP="$KUBE_MASTER_IP" KUBE_API_PORT="$KUBE_API_PORT" KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" KUBE_NODE_IP="$KUBE_NODE_IP" ETCD_SERVER_IP="$ETCD_SERVER_IP" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER" REGISTRY_ENABLED="$REGISTRY_ENABLED" REGISTRY_PORT="$REGISTRY_PORT" SWIFT_REGION="$SWIFT_REGION" REGISTRY_CONTAINER="$REGISTRY_CONTAINER" REGISTRY_INSECURE="$REGISTRY_INSECURE" REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE" TLS_DISABLED="$TLS_DISABLED" VERIFY_CA="$VERIFY_CA" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" AUTH_URL="$AUTH_URL" USERNAME="$USERNAME" PASSWORD="$PASSWORD" VOLUME_DRIVER="$VOLUME_DRIVER" REGION_NAME="$REGION_NAME" TENANT_NAME="$TENANT_NAME" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" WAIT_CURL="$WAIT_CURL" KUBE_VERSION="$KUBE_VERSION" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_USERNAME="$TRUSTEE_USERNAME" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUSTEE_DOMAIN_ID="$TRUSTEE_DOMAIN_ID" TRUST_ID="$TRUST_ID" INSECURE_REGISTRY_URL="$INSECURE_REGISTRY_URL" KUBE_CERTS_PATH="$KUBE_CERTS_PATH" HOST_CERTS_PATH="$HOST_CERTS_PATH" HYPERKUBE_IMAGE_REPO="$HYPERKUBE_IMAGE_REPO" CONTAINER_RUNTIME="$CONTAINER_RUNTIME" DNS_SERVICE_IP="$DNS_SERVICE_IP" DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-dashboard.yaml0000666000175100017510000000610513244017334031431 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kube-dashboard.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Configure Kubernetes Dashboard [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kube-dashboard.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kube-dashboard.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh TEMPLATE=/etc/kubernetes/addons/kubedash-svc.yaml mkdir -p $(dirname ${TEMPLATE}) cat > $TEMPLATE < $TEMPLATE < /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/kubedash-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null fi magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml0000666000175100017510000000141013244017334030351 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/systemd/system/add-ext-ca-certs.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Install custom CA certificates [Service] Type=oneshot ExecStart=/etc/sysconfig/add-ext-ca-certs.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/add-ext-ca-certs.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh CERT_FILE=/etc/ssl/certs/openstack-ca.pem if [ -n "$OPENSTACK_CA" ] then echo -ne "$OPENSTACK_CA" | tee -a ${CERT_FILE} chmod 0644 ${CERT_FILE} chown root:root ${CERT_FILE} update-ca-certificates fi magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml0000666000175100017510000000111713244017334027247 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/wc-notify.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Notify Heat [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/wc-notify.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/wc-notify.sh owner: "root:root" permissions: "0755" content: | #!/bin/bash -v command="$WAIT_CURL $VERIFY_CA --data-binary '{\"status\": \"SUCCESS\"}'" eval $(echo "$command") magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-coredns.yaml0000666000175100017510000001177113244017334030220 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-coredns.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Configure Kubernetes CoreDNS Addon [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-coredns.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-coredns.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh TEMPLATE=/etc/kubernetes/addons/coredns-sa.yaml mkdir -p $(dirname ${TEMPLATE}) cat > $TEMPLATE < $TEMPLATE < $TEMPLATE < $TEMPLATE < /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-cm.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/configmaps" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml0000666000175100017510000000713713244017334031510 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kubelet.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Enable Kubelet [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kubelet-master.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kubelet-master.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh if [ -z "${KUBE_NODE_IP}" ]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi if [ -n "${INSECURE_REGISTRY_URL}" ]; then INSECURE_REGISTRY_ARGS="--pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:3.0" else INSECURE_REGISTRY_ARGS="" fi HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') uuid_file="/var/run/kubelet-pod.uuid" CONF_FILE=/etc/systemd/system/kubelet.service cat > $CONF_FILE < $TEMPLATE #!/bin/sh # This is bind mounted into the kubelet rootfs and all rkt shell-outs go # through this rkt wrapper. It essentially enters the host mount namespace # (which it is already in) only for the purpose of breaking out of the chroot # before calling rkt. It makes things like rkt gc work and avoids bind mounting # in certain rkt filesystem dependancies into the kubelet rootfs. This can # eventually be obviated when the write-api stuff gets upstream and rkt gc is # through the api-server. Related issue: # https://github.com/coreos/rkt/issues/2878 exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" EOF systemctl enable kubelet systemctl --no-block start kubelet magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-scheduler.yaml0000666000175100017510000000243713244017334031464 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kube-scheduler.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Enable Kubernetes Scheduler [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kube-scheduler.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kube-scheduler.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml mkdir -p $(dirname ${TEMPLATE}) cat > ${TEMPLATE} < ${KUBE_SYSTEM_JSON} < ${TEMPLATE} < $DOCKER_HTTP_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF DOCKER_RESTART=1 echo "http_proxy=$HTTP_PROXY" >> $ENVIRONMENT fi if [ -n "$HTTPS_PROXY" ]; then cat < $DOCKER_HTTPS_PROXY_CONF [Service] Environment=HTTPS_PROXY=$HTTPS_PROXY EOF DOCKER_RESTART=1 echo "https_proxy=$HTTPS_PROXY" >> $ENVIRONMENT fi if [ -n "$NO_PROXY" ]; then cat < $DOCKER_NO_PROXY_CONF [Service] Environment=NO_PROXY=$NO_PROXY EOF DOCKER_RESTART=1 echo "no_proxy=$NO_PROXY" >> $ENVIRONMENT fi if [ "$DOCKER_RESTART" -eq 1 ]; then systemctl daemon-reload systemctl --no-block restart docker.service fi magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service-client.yaml0000666000175100017510000000535213244017334033164 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-network-service.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Enable Network Service [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-network-service.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-network-service.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi if [ -z "${KUBE_NODE_IP}" ]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi ETCD_SERVER_IP=${ETCD_SERVER_IP:-127.0.0.1} PROTOCOL=https if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http fi ENV_FILE=/etc/flannel/options.env mkdir -p $(dirname $ENV_FILE) cat > $ENV_FILE <> $ENV_FILE < $DROP_IN_FILE < $DROP_IN_FILE < $DOCKER_FLANNEL_CONF < $CNI { "name": "podnet", "type": "flannel", "delegate": { "isDefaultGateway": true } } EOF DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env mkdir -p $(dirname $DOCKER_FLANNEL_CONF) cat > $DOCKER_FLANNEL_CONF < $FLANNEL_JSON < $ENV_FILE < $DROP_IN_FILE < $DROP_IN_FILE < $DOCKER_FLANNEL_CONF < $CNI { "name": "podnet", "type": "flannel", "delegate": { "isDefaultGateway": true } } EOF DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env mkdir -p $(dirname $DOCKER_FLANNEL_CONF) cat > $DOCKER_FLANNEL_CONF < $CONF_FILE < $TEMPLATE #!/bin/sh # This is bind mounted into the kubelet rootfs and all rkt shell-outs go # through this rkt wrapper. It essentially enters the host mount namespace # (which it is already in) only for the purpose of breaking out of the chroot # before calling rkt. It makes things like rkt gc work and avoids bind mounting # in certain rkt filesystem dependancies into the kubelet rootfs. This can # eventually be obviated when the write-api stuff gets upstream and rkt gc is # through the api-server. Related issue: # https://github.com/coreos/rkt/issues/2878 exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" EOF systemctl enable kubelet systemctl --no-block start kubelet magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert.yaml0000666000175100017510000001167713244017334027214 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/make-cert.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Make TLS certificates [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/make-cert.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/make-cert.sh owner: "root:root" permissions: "0755" content: | #!/bin/bash # Parse the JSON response that contains the TLS certificate, and print # out the certificate content. function parse_json_response { json_response=$1 # {..,"pem": "ABCD",..} -> ABCD key=$(echo "$json_response" | sed 's/^.*"pem": "\([^"]*\)".*$/\1/') # decode newline characters key=$(echo "$key" | sed 's/\\n/\n/g') echo "$key" } set -o errexit set -o nounset set -o pipefail if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) fi if [[ -z "${KUBE_NODE_IP}" ]]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" fi if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" fi MASTER_HOSTNAME=${MASTER_HOSTNAME:-} if [[ -n "${MASTER_HOSTNAME}" ]]; then sans="${sans},DNS:${MASTER_HOSTNAME}" fi sans="${sans},IP:127.0.0.1" KUBE_SERVICE_IP=$(echo $PORTAL_NETWORK_CIDR | awk 'BEGIN{FS="[./]"; OFS="."}{print $1,$2,$3,$4 + 1}') sans="${sans},IP:${KUBE_SERVICE_IP}" if [[ -n "${ETCD_LB_VIP}" ]]; then sans="${sans},IP:${ETCD_LB_VIP}" fi cert_conf_dir=${KUBE_CERTS_PATH}/conf mkdir -p ${cert_conf_dir} CA_CERT=${KUBE_CERTS_PATH}/ca.pem SERVER_CERT=${KUBE_CERTS_PATH}/apiserver.pem SERVER_CSR=${KUBE_CERTS_PATH}/apiserver.pem SERVER_KEY=${KUBE_CERTS_PATH}/apiserver-key.pem if [ -f ${SERVER_CERT} ] || [ -f ${SERVER_KEY} ] || [ -f ${SERVER_CSR} ]; then exit 0 fi #Get a token by user credentials and trust cat > auth.json << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } } } } EOF USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "Content-Type: application/json" -d @auth.json \ $AUTH_URL/auth/tokens | grep X-Subject-Token | awk '{print $2}' | tr -d '\r'` rm -rf auth.json # Get CA certificate for this cluster ca_cert_json=$(curl $VERIFY_CA -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ $MAGNUM_URL/certificates/$CLUSTER_UUID) parse_json_response "${ca_cert_json}" > ${CA_CERT} # Create config for server's csr cat > ${cert_conf_dir}/openssl.cnf < ${SERVER_CERT} chmod 600 ${KUBE_CERTS_PATH}/*-key.pem # Certs will also be used by etcd service chown -R etcd:etcd ${KUBE_CERTS_PATH} magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-master.yaml0000666000175100017510000000303313244017334032151 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kube-proxy.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Configure Kubernetes Proxy [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kube-proxy-master.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kube-proxy-master.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml mkdir -p $(dirname ${TEMPLATE}) cat > ${TEMPLATE} < ABCD key=$(echo "$json_response" | sed 's/^.*"pem": "\([^"]*\)".*$/\1/') # decode newline characters key=$(echo "$key" | sed 's/\\n/\n/g') echo "$key" } set -o errexit set -o nounset set -o pipefail if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi cert_conf_dir=${KUBE_CERTS_PATH}/conf mkdir -p ${cert_conf_dir} CA_CERT=${KUBE_CERTS_PATH}/ca.pem CLIENT_CERT=${KUBE_CERTS_PATH}/worker.pem CLIENT_CSR=${KUBE_CERTS_PATH}/worker.csr CLIENT_KEY=${KUBE_CERTS_PATH}/worker-key.pem if [ -f ${CLIENT_CERT} ] || [ -f ${CLIENT_KEY} ] || [ -f ${CLIENT_CSR} ]; then exit 0 fi #Get a token by user credentials and trust cat > auth.json << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } } } } EOF USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "Content-Type: application/json" -d @auth.json \ $AUTH_URL/auth/tokens | grep X-Subject-Token | awk '{print $2}' | tr -d '\r'` rm -rf auth.json ca_cert_json=$(curl $VERIFY_CA -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ $MAGNUM_URL/certificates/$CLUSTER_UUID) parse_json_response "${ca_cert_json}" > ${CA_CERT} # Create config for client's csr cat > ${cert_conf_dir}/worker-openssl.conf < ${CLIENT_CERT} chmod 600 ${KUBE_CERTS_PATH}/*-key.pem chown root:root ${KUBE_CERTS_PATH}/*-key.pem magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-controller-manager.yaml0000666000175100017510000000453113244017334033276 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kube-controller-manager.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Enable Kubernetes Controller Manager [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kube-controller-manager.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kube-controller-manager.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh SYSCONFIG_PATH=/etc/sysconfig SERVICE_ACCOUNT_PRIVATE_KEY_FILE=${KUBE_CERTS_PATH}/apiserver-key.pem ROOT_CA_FILE=${KUBE_CERTS_PATH}/ca.pem if [ "${TLS_DISABLED}" == "True" ]; then SERVICE_ACCOUNT_PRIVATE_KEY_FILE= ROOT_CA_FILE= fi TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml mkdir -p $(dirname ${TEMPLATE}) cat > ${TEMPLATE} < $DROP_IN_FILE <> $DROP_IN_FILE <> $DROP_IN_FILE fi systemctl enable etcd-member systemctl --no-block start etcd-member magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml0000666000175100017510000000122413244017334030573 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/kubernetes/config/worker-kubeconfig.yaml owner: "root:root" permissions: "0644" content: | apiVersion: v1 kind: Config clusters: - name: local cluster: certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: kubelet user: client-certificate: /etc/kubernetes/ssl/worker.pem client-key: /etc/kubernetes/ssl/worker-key.pem contexts: - context: cluster: local user: kubelet name: kubelet-context current-context: kubelet-context magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-apiserver.yaml0000666000175100017510000000546713244017334031514 0ustar zuulzuul00000000000000#cloud-config write_files: - path: /etc/systemd/system/enable-kube-apiserver.service owner: "root:root" permissions: "0644" content: | [Unit] Description=Configure Kubernetes API Server [Service] Type=oneshot EnvironmentFile=/etc/sysconfig/heat-params ExecStart=/etc/sysconfig/enable-kube-apiserver.sh [Install] WantedBy=multi-user.target - path: /etc/sysconfig/enable-kube-apiserver.sh owner: "root:root" permissions: "0755" content: | #!/bin/sh KUBE_ADMISSION_CONTROL="" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_ADMISSION_CONTROL="- --admission-control=${ADMISSION_CONTROL_LIST}" fi TLS_CERT_FILE=${KUBE_CERTS_PATH}/apiserver.pem TLS_PRIVATE_KEY_FILE=${KUBE_CERTS_PATH}/apiserver-key.pem CLIENT_CA_FILE=${KUBE_CERTS_PATH}/ca.pem INSECURE_PORT=8080 SECURE_PORT=${KUBE_API_PORT} BIND_ADDRESS_CMD="--bind-address=0.0.0.0" if [ "${TLS_DISABLED}" == "True" ]; then TLS_CERT_FILE= TLS_PRIVATE_KEY_FILE= CLIENT_CA_FILE= INSECURE_PORT=${KUBE_API_PORT} SECURE_PORT=0 BIND_ADDRESS_CMD="--insecure-bind-address=0.0.0.0" fi TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml mkdir -p $(dirname ${TEMPLATE}) cat > $TEMPLATE < $TEMPLATE [Service] Environment=DOCKER_OPTS=$DOCKER_OPTS EOF systemctl daemon-reload systemctl --no-block restart docker.service magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/COPYING0000666000175100017510000002613613244017334023521 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/0000775000175100017510000000000013244017675022415 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/version.py0000666000175100017510000000126413244017334024451 0ustar zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '2.0.0' driver = 'swarm_fedora_atomic_v2' container_version = '1.12.6' magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/driver.py0000666000175100017510000000221713244017334024256 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.heat import driver from magnum.drivers.swarm_fedora_atomic_v2 import monitor from magnum.drivers.swarm_fedora_atomic_v2 import template_def class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'fedora-atomic', 'coe': 'swarm-mode'}, ] def get_template_definition(self): return template_def.AtomicSwarmTemplateDefinition() def get_monitor(self, context, cluster): return monitor.SwarmMonitor(context, cluster) magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/monitor.py0000666000175100017510000000765213244017334024462 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from magnum.common import docker_utils from magnum.conductor import monitors LOG = log.getLogger(__name__) class SwarmMonitor(monitors.MonitorBase): def __init__(self, context, cluster): super(SwarmMonitor, self).__init__(context, cluster) self.data = {} self.data['nodes'] = [] self.data['containers'] = [] @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, } def pull_data(self): with docker_utils.docker_for_cluster(self.context, self.cluster) as docker: system_info = docker.info() self.data['nodes'] = self._parse_node_info(system_info) # pull data from each container containers = [] for container in docker.containers(all=True): try: container = docker.inspect_container(container['Id']) except Exception as e: LOG.warning("Ignore error [%(e)s] when inspecting " "container %(container_id)s.", {'e': e, 'container_id': container['Id']}, exc_info=True) containers.append(container) self.data['containers'] = containers def compute_memory_util(self): mem_total = 0 for node in self.data['nodes']: mem_total += node['MemTotal'] mem_reserved = 0 for container in self.data['containers']: mem_reserved += container['HostConfig']['Memory'] if mem_total == 0: return 0 else: return mem_reserved * 100 / mem_total def _parse_node_info(self, system_info): """Parse system_info to retrieve memory size of each node. :param system_info: The output returned by docker.info(). Example: { u'Debug': False, u'NEventsListener': 0, u'DriverStatus': [ [u'\x08Strategy', u'spread'], [u'\x08Filters', u'...'], [u'\x08Nodes', u'2'], [u'node1', u'10.0.0.4:2375'], [u' \u2514 Containers', u'1'], [u' \u2514 Reserved CPUs', u'0 / 1'], [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'], [u'node2', u'10.0.0.3:2375'], [u' \u2514 Containers', u'2'], [u' \u2514 Reserved CPUs', u'0 / 1'], [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'] ], u'Containers': 3 } :return: Memory size of each node. Excample: [{'MemTotal': 2203318222.848}, {'MemTotal': 2203318222.848}] """ nodes = [] for info in system_info['DriverStatus']: key = info[0] value = info[1] if key == u' \u2514 Reserved Memory': memory = value # Example: '0 B / 2.052 GiB' memory = memory.split('/')[1].strip() # Example: '2.052 GiB' memory = memory.split(' ')[0] # Example: '2.052' memory = float(memory) * 1024 * 1024 * 1024 nodes.append({'MemTotal': memory}) return nodes magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/template_def.py0000666000175100017510000000272513244017334025420 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from magnum.drivers.heat import swarm_mode_template_def as sftd class AtomicSwarmTemplateDefinition(sftd.SwarmModeTemplateDefinition): """Docker swarm template for a Fedora Atomic VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/swarmcluster.yaml') def get_params(self, context, cluster_template, cluster, **kwargs): ep = kwargs.pop('extra_params', {}) ep['number_of_secondary_masters'] = cluster.master_count - 1 return super(AtomicSwarmTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=ep, **kwargs) magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/__init__.py0000666000175100017510000000000013244017334024506 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/0000775000175100017510000000000013244017675024413 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/0000775000175100017510000000000013244017675026401 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-master-service.sh0000666000175100017510000000413513244017334034142 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params set -x if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi if [ "${IS_PRIMARY_MASTER}" = "True" ]; then cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN #!/bin/bash -xe docker swarm init --advertise-addr "${SWARM_NODE_IP}" if [[ \$? -eq 0 ]]; then status="SUCCESS" msg="Swarm init was successful." else status="FAILURE" msg="Failed to init swarm." fi sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"\$status\", \"reason\": \"\$msg\"}'" START_SWARM_BIN else if [ "${TLS_DISABLED}" = 'False' ]; then tls="--tlsverify" tls=$tls" --tlscacert=/etc/docker/ca.crt" tls=$tls" --tlskey=/etc/docker/server.key" tls=$tls" --tlscert=/etc/docker/server.crt" fi cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN #!/bin/bash -xe i=0 until token=\$(docker $tls -H $PRIMARY_MASTER_IP swarm join-token --quiet manager) do ((i++)) [ \$i -lt 5 ] || break; sleep 5 done if [[ -z \$token ]] ; then sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'" fi i=0 until docker swarm join --token \$token $PRIMARY_MASTER_IP:2377 do ((i++)) [ \$i -lt 5 ] || break; sleep 5 done if [[ \$i -ge 5 ]] ; then sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Manager failed to join swarm.\"}'" else sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Manager joined swarm.\"}'" fi START_SWARM_BIN fi chmod +x /usr/local/bin/magnum-start-swarm-manager cat > /etc/systemd/system/swarm-manager.service << END_SERVICE [Unit] Description=Swarm Manager After=docker.service Requires=docker.service [Service] Type=oneshot ExecStart=/usr/local/bin/magnum-start-swarm-manager [Install] WantedBy=multi-user.target END_SERVICE chown root:root /etc/systemd/system/swarm-manager.service chmod 644 /etc/systemd/system/swarm-manager.service systemctl daemon-reload systemctl start --no-block swarm-manager magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-worker-service.sh0000666000175100017510000000315213244017334034156 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params set -x if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi if [ "${TLS_DISABLED}" = 'False' ]; then tls="--tlsverify" tls=$tls" --tlscacert=/etc/docker/ca.crt" tls=$tls" --tlskey=/etc/docker/server.key" tls=$tls" --tlscert=/etc/docker/server.crt" fi cat > /usr/local/bin/magnum-start-swarm-worker << START_SWARM_BIN #!/bin/bash -ex i=0 until token=\$(/usr/bin/docker $tls -H $SWARM_API_IP swarm join-token --quiet worker) do ((i++)) [ \$i -lt 5 ] || break; sleep 5 done if [[ -z \$token ]] ; then sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'" fi i=0 until docker swarm join --token \$token $SWARM_API_IP:2377 do ((i++)) [ \$i -lt 5 ] || break; sleep 5 done if [[ \$i -ge 5 ]] ; then sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Node failed to join swarm.\"}'" else sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Node joined swarm.\"}'" fi START_SWARM_BIN chmod +x /usr/local/bin/magnum-start-swarm-worker cat > /etc/systemd/system/swarm-worker.service << END_SERVICE [Unit] Description=Swarm Worker After=docker.service Requires=docker.service [Service] Type=oneshot ExecStart=/usr/local/bin/magnum-start-swarm-worker [Install] WantedBy=multi-user.target END_SERVICE chown root:root /etc/systemd/system/swarm-worker.service chmod 644 /etc/systemd/system/swarm-worker.service systemctl daemon-reload systemctl start --no-block swarm-worker magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-heat-params-master.yaml0000666000175100017510000000171413244017334034105 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | IS_PRIMARY_MASTER="$IS_PRIMARY_MASTER" WAIT_CURL="$WAIT_CURL" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" PRIMARY_MASTER_IP="$PRIMARY_MASTER_IP" SWARM_API_IP="$SWARM_API_IP" SWARM_NODE_IP="$SWARM_NODE_IP" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" TLS_DISABLED="$TLS_DISABLED" API_IP_ADDRESS="$API_IP_ADDRESS" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUST_ID="$TRUST_ID" AUTH_URL="$AUTH_URL" VOLUME_DRIVER="$VOLUME_DRIVER" REXRAY_PREEMPT="$REXRAY_PREEMPT" VERIFY_CA="$VERIFY_CA" magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml0000666000175100017510000002432213244017334027273 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single swarm worker node, based on a vanilla Fedora Atomic image. This stack is included by a ResourceGroup resource in the parent template (swarmcluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server server_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network_id: type: string description: Network from which to allocate fixed addresses. fixed_subnet_id: type: string description: Subnet from which to allocate fixed addresses. http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker swarm_api_ip: type: string description: swarm master's api server ip address api_ip_address: type: string description: swarm master's api server public ip address cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from tls_disabled: type: boolean description: whether or not to disable TLS secgroup_swarm_node_id: type: string description: ID of the security group for swarm node. trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone volume_driver: type: string description: volume driver to use for container storage default: "" rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" verify_ca: type: boolean description: whether or not to validate certificate authority openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" resources: node_wait_handle: type: "OS::Heat::WaitConditionHandle" node_wait_condition: type: "OS::Heat::WaitCondition" depends_on: swarm-node properties: handle: {get_resource: node_wait_handle} timeout: 6000 ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. write_heat_params: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml} params: "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$SWARM_API_IP": {get_param: swarm_api_ip} "$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$TLS_DISABLED": {get_param: tls_disabled} "$API_IP_ADDRESS": {get_param: api_ip_address} "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_USERNAME": {get_param: trustee_username} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$VOLUME_DRIVER": {get_param: volume_driver} "$REXRAY_PREEMPT": {get_param: rexray_preempt} "$VERIFY_CA": {get_param: verify_ca} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} remove_docker_key: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} make_cert: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} add_docker_daemon_options: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} write_docker_socket: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} write_swarm_worker_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: fragments/write-swarm-worker-service.sh} enable_services: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} params: "$NODE_SERVICES": "docker.socket docker" configure_selinux: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} add_proxy: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} volume_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} swarm_node_init: type: "OS::Heat::MultipartMime" properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: configure_selinux} - config: {get_resource: remove_docker_key} - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: add_docker_daemon_options} - config: {get_resource: write_docker_socket} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: write_swarm_worker_service} - config: {get_resource: volume_service} # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems swarm-node: type: "OS::Nova::Server" properties: name: {get_param: name} image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name user_data_format: RAW user_data: {get_resource: swarm_node_init} networks: - port: get_resource: swarm_node_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} swarm_node_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network_id security_groups: - {get_param: secgroup_swarm_node_id} fixed_ips: - subnet_id: get_param: fixed_subnet_id swarm_node_floating: type: "OS::Neutron::FloatingIP" properties: floating_network: get_param: external_network port_id: get_resource: swarm_node_eth0 ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the node. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: swarm-node} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: swarm_node_ip: value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the Swarm node. swarm_node_external_ip: value: {get_attr: [swarm_node_floating, floating_ip_address]} description: > This is the "public" address of the Swarm node. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml0000666000175100017510000003567613244017334030045 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Docker Swarm-Mode cluster. A swarm cluster is made up of several master nodes, and N worker nodes. Every node in the cluster, including the master, is running a Docker daemon and joins the swarm as a manager or as a worker. The managers are listening on port 2375. By default, the cluster is made up of one master node and one worker node. parameters: # # REQUIRED PARAMETERS # ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from server_image: type: string description: glance image used to boot the server # # OPTIONAL PARAMETERS # master_flavor: type: string description: flavor to use when booting the swarm master default: m1.small node_flavor: type: string description: flavor to use when booting the swarm node dns_nameserver: type: string description: address of a dns nameserver reachable in your environment default: 8.8.8.8 http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" number_of_masters: type: number description: how many swarm masters to spawn default: 1 number_of_nodes: type: number description: how many swarm nodes to spawn default: 1 number_of_secondary_masters: type: number description: how many secondary masters to spawn fixed_network_cidr: type: string description: network range for fixed ip network default: "10.0.0.0/24" tls_disabled: type: boolean description: whether or not to enable TLS default: False docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] swarm_port: type: number description: > The port which are used by swarm manager to provide swarm service. default: 2375 trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone volume_driver: type: string description: volume driver to use for container storage default: "" constraints: - allowed_values: ["","rexray"] rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" verify_ca: type: boolean description: whether or not to validate certificate authority openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] availability_zone: type: string description: > availability zone for master and nodes default: "" resources: ###################################################################### # # network resources. allocate a network and router for our server. # it would also be possible to take advantage of existing network # resources (and have the deployer provide network and subnet ids, # etc, as parameters), but I wanted to minmize the amount of # configuration necessary to make this go. network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: swarm_port} ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_swarm_manager: type: "OS::Neutron::SecurityGroup" properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 2375 port_range_max: 2375 - protocol: tcp remote_ip_prefix: {get_param: fixed_network_cidr} port_range_min: 1 port_range_max: 65535 - protocol: udp port_range_min: 53 port_range_max: 53 secgroup_swarm_node: type: "OS::Neutron::SecurityGroup" properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # resources that expose the IPs of either the swarm master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_external_ip]} master_private_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # Swarm manager is responsible for the entire cluster and manages the # resources of multiple Docker hosts at scale. # It supports high availability by create a primary manager and multiple # replica instances. swarm_primary_master: type: "OS::Heat::ResourceGroup" depends_on: - network properties: count: 1 resource_def: type: swarmmaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'primary-master', '%index%'] is_primary_master: True ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} server_flavor: {get_param: master_flavor} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} fixed_network_id: {get_attr: [network, fixed_network]} fixed_subnet_id: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} swarm_api_ip: {get_attr: [api_lb, address]} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} tls_disabled: {get_param: tls_disabled} secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} swarm_port: {get_param: swarm_port} api_pool_id: {get_attr: [api_lb, pool_id]} api_ip_address: {get_attr: [api_lb, floating_address]} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} volume_driver: {get_param: volume_driver} rexray_preempt: {get_param: rexray_preempt} verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} swarm_secondary_masters: type: "OS::Heat::ResourceGroup" depends_on: - network - swarm_primary_master properties: count: {get_param: number_of_secondary_masters} resource_def: type: swarmmaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'secondary-master', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} server_flavor: {get_param: master_flavor} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} fixed_network_id: {get_attr: [network, fixed_network]} fixed_subnet_id: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} tls_disabled: {get_param: tls_disabled} secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} swarm_port: {get_param: swarm_port} api_pool_id: {get_attr: [api_lb, pool_id]} api_ip_address: {get_attr: [api_lb, floating_address]} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} volume_driver: {get_param: volume_driver} rexray_preempt: {get_param: rexray_preempt} verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} swarm_nodes: type: "OS::Heat::ResourceGroup" depends_on: - network - swarm_primary_master properties: count: {get_param: number_of_nodes} resource_def: type: swarmnode.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'node', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} server_flavor: {get_param: node_flavor} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} fixed_network_id: {get_attr: [network, fixed_network]} fixed_subnet_id: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} tls_disabled: {get_param: tls_disabled} secgroup_swarm_node_id: {get_resource: secgroup_swarm_node} api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} trustee_domain_id: {get_param: trustee_domain_id} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} volume_driver: {get_param: volume_driver} rexray_preempt: {get_param: rexray_preempt} verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} description: > This is the API endpoint of the Swarm masters. Use this to access the Swarm API server from outside the cluster. swarm_primary_master_private: value: {get_attr: [swarm_primary_master, swarm_master_ip]} description: > This is a list of the "private" addresses of all the Swarm masters. swarm_primary_master: value: {get_attr: [swarm_primary_master, swarm_master_external_ip]} description: > This is a list of "public" ip addresses of all Swarm masters. Use these addresses to log into the Swarm masters via ssh. swarm_secondary_masters: value: {get_attr: [swarm_secondary_masters, swarm_master_external_ip]} description: > This is a list of "public" ip addresses of all Swarm masters. Use these addresses to log into the Swarm masters via ssh. swarm_nodes_private: value: {get_attr: [swarm_nodes, swarm_node_ip]} description: > This is a list of the "private" addresses of all the Swarm nodes. swarm_nodes: value: {get_attr: [swarm_nodes, swarm_node_external_ip]} description: > This is a list of the "public" addresses of all the Swarm nodes. Use these addresses to, e.g., log into the nodes. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml0000666000175100017510000002672213244017334027647 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines swarm master node. A swarm mater node is running a Docker daemon and joins swarm as a manager. The Docker daemon listens on port 2375. parameters: name: type: string description: server name ssh_key_name: type: string description: name of ssh key to be provisioned on our server docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name external_network: type: string description: uuid/name of a network to use for floating ip addresses cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from fixed_network_id: type: string description: Network from which to allocate fixed addresses. fixed_subnet_id: type: string description: Subnet from which to allocate fixed addresses. swarm_api_ip: type: string description: swarm master's api server ip address default: "" api_ip_address: type: string description: swarm master's api server public ip address default: "" server_image: type: string description: glance image used to boot the server server_flavor: type: string description: flavor to use when booting the server http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker tls_disabled: type: boolean description: whether or not to enable TLS secgroup_swarm_master_id: type: string description: ID of the security group for swarm master. swarm_port: type: number description: > The port which are used by swarm manager to provide swarm service. api_pool_id: type: string description: ID of the load balancer pool of swarm master server. trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone volume_driver: type: string description: volume driver to use for container storage default: "" rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" is_primary_master: type: boolean description: whether this master is primary or not default: False verify_ca: type: boolean description: whether or not to validate certificate authority openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" resources: master_wait_handle: type: "OS::Heat::WaitConditionHandle" master_wait_condition: type: "OS::Heat::WaitCondition" depends_on: swarm-master properties: handle: {get_resource: master_wait_handle} timeout: 6000 ###################################################################### # # resource that exposes the IPs of either the Swarm master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_ip_address} pool_private_ip: {get_param: swarm_api_ip} master_public_ip: {get_attr: [swarm_master_floating, floating_ip_address]} master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-master.yaml} params: "$IS_PRIMARY_MASTER": {get_param: is_primary_master} "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$PRIMARY_MASTER_IP": {get_param: swarm_api_ip} "$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]} "$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$TLS_DISABLED": {get_param: tls_disabled} "$API_IP_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$VOLUME_DRIVER": {get_param: volume_driver} "$REXRAY_PREEMPT": {get_param: rexray_preempt} "$VERIFY_CA": {get_param: verify_ca} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} remove_docker_key: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} make_cert: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} add_docker_daemon_options: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} write_docker_socket: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} write_swarm_master_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: fragments/write-swarm-master-service.sh} enable_services: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} params: "$NODE_SERVICES": "docker.socket docker" configure_selinux: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} add_proxy: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} volume_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} swarm_master_init: type: "OS::Heat::MultipartMime" properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: configure_selinux} - config: {get_resource: remove_docker_key} - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: add_docker_daemon_options} - config: {get_resource: write_docker_socket} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: write_swarm_master_service} - config: {get_resource: volume_service} ###################################################################### # # Swarm_manager is a special node running the swarm manage daemon along # side the swarm worker. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems swarm-master: type: "OS::Nova::Server" properties: name: {get_param: name} image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name user_data_format: RAW user_data: {get_resource: swarm_master_init} networks: - port: get_resource: swarm_master_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} swarm_master_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network_id security_groups: - {get_param: secgroup_swarm_master_id} fixed_ips: - subnet_id: get_param: fixed_subnet_id swarm_master_floating: type: "OS::Neutron::FloatingIP" properties: floating_network: get_param: external_network port_id: get_resource: swarm_master_eth0 api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet_id } protocol_port: {get_param: swarm_port} ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the node. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: swarm-master} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: swarm_master_ip: value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" addresses of all the Swarm master. swarm_master_external_ip: value: {get_attr: [swarm_master_floating, floating_ip_address]} description: > This is the "public" ip addresses of Swarm master. magnum-6.1.0/magnum/drivers/common/0000775000175100017510000000000013244017675017271 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/driver.py0000666000175100017510000001543013244017334021133 0ustar zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_config import cfg from pkg_resources import iter_entry_points from stevedore import driver from magnum.common import exception from magnum.objects import cluster_template CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class Driver(object): definitions = None @classmethod def load_entry_points(cls): for entry_point in iter_entry_points('magnum.drivers'): if entry_point.name not in CONF.drivers.disabled_drivers: yield entry_point, entry_point.load(require=False) @classmethod def get_drivers(cls): '''Retrieves cluster drivers from python entry_points. Example: With the following classes: class Driver1(Driver): provides = [ ('server_type1', 'os1', 'coe1') ] class Driver2(Driver): provides = [ ('server_type2', 'os2', 'coe2') ] And the following entry_points: magnum.drivers = driver_name_1 = some.python.path:Driver1 driver_name_2 = some.python.path:Driver2 get_drivers will return: { (server_type1, os1, coe1): {'driver_name_1': Driver1}, (server_type2, os2, coe2): {'driver_name_2': Driver2} } :return: dict ''' if not cls.definitions: cls.definitions = dict() for entry_point, def_class in cls.load_entry_points(): for cluster_type in def_class().provides: cluster_type_tuple = (cluster_type['server_type'], cluster_type['os'], cluster_type['coe']) providers = cls.definitions.setdefault(cluster_type_tuple, dict()) providers['entry_point_name'] = entry_point.name providers['class'] = def_class return cls.definitions @classmethod def get_driver(cls, server_type, os, coe): '''Get Driver. Returns the Driver class for the provided cluster_type. With the following classes: class Driver1(Driver): provides = [ ('server_type1', 'os1', 'coe1') ] class Driver2(Driver): provides = [ ('server_type2', 'os2', 'coe2') ] And the following entry_points: magnum.drivers = driver_name_1 = some.python.path:Driver1 driver_name_2 = some.python.path:Driver2 get_driver('server_type2', 'os2', 'coe2') will return: Driver2 :param server_type: The server_type the cluster definition will build on :param os: The operating system the cluster definition will build on :param coe: The Container Orchestration Environment the cluster will produce :return: class ''' definition_map = cls.get_drivers() cluster_type = (server_type, os, coe) if cluster_type not in definition_map: raise exception.ClusterTypeNotSupported( server_type=server_type, os=os, coe=coe) driver_info = definition_map[cluster_type] # TODO(muralia): once --drivername is supported as an input during # cluster create, change the following line to use driver name for # loading. return driver.DriverManager("magnum.drivers", driver_info['entry_point_name']).driver() @classmethod def get_driver_for_cluster(cls, context, cluster): ct = cluster_template.ClusterTemplate.get_by_uuid( context, cluster.cluster_template_id) return cls.get_driver(ct.server_type, ct.cluster_distro, ct.coe) def update_cluster_status(self, context, cluster): '''Update the cluster status based on underlying orchestration This is an optional method if your implementation does not need to poll the orchestration for status updates (for example, your driver uses some notification-based mechanism instead). ''' return @abc.abstractproperty def provides(self): '''return a list of (server_type, os, coe) tuples Returns a list of cluster configurations supported by this driver ''' raise NotImplementedError("Subclasses must implement 'provides'.") @abc.abstractmethod def create_cluster(self, context, cluster, cluster_create_timeout): raise NotImplementedError("Subclasses must implement " "'create_cluster'.") @abc.abstractmethod def update_cluster(self, context, cluster, scale_manager=None, rollback=False): raise NotImplementedError("Subclasses must implement " "'update_cluster'.") @abc.abstractmethod def delete_cluster(self, context, cluster): raise NotImplementedError("Subclasses must implement " "'delete_cluster'.") @abc.abstractmethod def create_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'create_federation'.") @abc.abstractmethod def update_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'update_federation'.") @abc.abstractmethod def delete_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'delete_federation'.") def get_monitor(self, context, cluster): """return the monitor with container data for this driver.""" return None def get_scale_manager(self, context, osclient, cluster): """return the scale manager for this driver.""" return None def rotate_ca_certificate(self, context, cluster): raise exception.NotSupported( "'rotate_ca_certificate' is not supported by this driver.") magnum-6.1.0/magnum/drivers/common/k8s_scale_manager.py0000666000175100017510000000210513244017334023201 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor import k8s_api as k8s from magnum.conductor.scale_manager import ScaleManager class K8sScaleManager(ScaleManager): def __init__(self, context, osclient, cluster): super(K8sScaleManager, self).__init__(context, osclient, cluster) def _get_hosts_with_container(self, context, cluster): k8s_api = k8s.create_k8s_api(self.context, cluster) hosts = set() for pod in k8s_api.list_namespaced_pod(namespace='default').items: hosts.add(pod.spec.node_name) return hosts magnum-6.1.0/magnum/drivers/common/image/0000775000175100017510000000000013244017675020353 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/0000775000175100017510000000000013244017675023065 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/README.rst0000666000175100017510000000603513244017334024552 0ustar zuulzuul00000000000000============= fedora-atomic ============= Generates a Fedora Atomic image based on a public deployed tree. This element has been tested under Debian, Ubuntu, CentOS and Fedora operating systems. Pre-requisites to run diskimage-builder --------------------------------------- For diskimage-builder to work, following packages need to be present: * python-dev * build-essential * python-pip * kpartx * python-lzma * qemu-utils * yum * yum-utils * python-yaml * curl For Debian/Ubuntu systems, use:: apt-get install python-dev build-essential python-pip kpartx python-lzma \ qemu-utils yum yum-utils python-yaml git curl For CentOS and Fedora < 22, use:: yum install python-dev build-essential python-pip kpartx python-lzma qemu-utils yum yum-utils python-yaml curl For Fedora >= 22, use:: dnf install python-devel @development-tools python-pip kpartx python-backports-lzma @virtualization yum yum-utils python-yaml curl How to generate Fedora Atomic image ----------------------------------- To generate an atomic image for Fedora 25 these commands can be executed:: # Install diskimage-builder in virtual environment virtualenv . . bin/activate pip install diskimage-builder git clone https://git.openstack.org/openstack/magnum git clone https://git.openstack.org/openstack/dib-utils.git export PATH="${PWD}/dib-utils/bin:$PATH" export ELEMENTS_PATH=$(python -c 'import os, diskimage_builder, pkg_resources;print(os.path.abspath(pkg_resources.resource_filename(diskimage_builder.__name__, "elements")))') export ELEMENTS_PATH="${ELEMENTS_PATH}:${PWD}/magnum/magnum/drivers/common/image" export DIB_RELEASE=25 # this can be switched to the desired version export DIB_IMAGE_SIZE=2.5 # we need to give a bit more space to loopback device disk-image-create fedora-atomic -o fedora-atomic This element can consume already published trees, but you can use it to consume your own generated trees. Documentation about creating own trees can be found at `http://developers.redhat.com/blog/2015/01/08/creating-custom-atomic-trees-images-and-installers-part-1/ `_ Environment Variables --------------------- To properly reference the tree, the following env vars can be set: FEDORA_ATOMIC_TREE_URL :Required: Yes :Description: Url for the public fedora-atomic tree to use. It can reference to own published trees. :Default: ``https://kojipkgs.fedoraproject.org/atomic/${DIB_RELEASE}/`` FEDORA_ATOMIC_TREE_REF :Required: Yes :Description: Reference of the tree to install. :Default: ``$(curl ${FEDORA_ATOMIC_TREE_URL}/refs/heads/fedora-atomic/${DIB_RELEASE}/x86_64/docker-host)`` You can use the defaults or export your url and reference, like following:: export FEDORA_ATOMIC_TREE_URL="https://kojipkgs.fedoraproject.org/atomic/25/" export FEDORA_ATOMIC_TREE_REF="$(curl https://kojipkgs.fedoraproject.org/atomic/25/refs/heads/fedora-atomic/25/x86_64/docker-host)" magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/environment.d/0000775000175100017510000000000013244017675025653 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/environment.d/50-fedora-atomic0000666000175100017510000000041113244017334030520 0ustar zuulzuul00000000000000export FEDORA_ATOMIC_TREE_URL=${FEDORA_ATOMIC_TREE_URL:-https://kojipkgs.fedoraproject.org/atomic/${DIB_RELEASE}/} export FEDORA_ATOMIC_TREE_REF=${FEDORA_ATOMIC_TREE_REF:-$(curl ${FEDORA_ATOMIC_TREE_URL}/refs/heads/fedora-atomic/${DIB_RELEASE}/x86_64/docker-host)} magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/element-deps0000666000175100017510000000005413244017334025363 0ustar zuulzuul00000000000000fedora-minimal growroot package-installs vm magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/package-installs.yaml0000666000175100017510000000005513244017334027165 0ustar zuulzuul00000000000000# Install packages needed for atomic ostree: magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/validate_atomic_image.sh0000777000175100017510000000155513244017334027713 0ustar zuulzuul00000000000000#!/bin/bash # # Copyright (c) 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # check that image is valid qemu-img check -q $1 # validate estimated size FILESIZE=$(stat -c%s "$1") MIN_SIZE=629145600 # 600MB if [ $FILESIZE -lt $MIN_SIZE ] ; then echo "Error: generated image size is lower than expected." exit 1 fi magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/install_imagebuild_deps.sh0000777000175100017510000000064013244017334030261 0ustar zuulzuul00000000000000#!/bin/bash # This script installs all needed dependencies to generate # images using diskimage-builder. Please not it only has been # tested on Ubuntu Trusty set -eux set -o pipefail sudo apt-get update || true sudo apt-get install -y \ debootstrap \ kpartx \ qemu-utils \ python-dev \ build-essential \ python-pip \ python-lzma \ yum \ yum-utils \ python-yaml \ curl magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/finalise.d/0000775000175100017510000000000013244017675025101 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/image/fedora-atomic/finalise.d/80-fedora-atomic0000777000175100017510000000512713244017334027765 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # generate ostree in root cd / ostree admin os-init fedora-atomic ostree remote add --set=gpg-verify=false fedora-atomic ${FEDORA_ATOMIC_TREE_URL} ostree pull fedora-atomic ${FEDORA_ATOMIC_TREE_REF} ostree remote delete fedora-atomic ostree admin deploy --os=fedora-atomic ${FEDORA_ATOMIC_TREE_REF} --karg-proc-cmdline --karg=selinux=0 # copy /etc/fstab to the deployed directory SYSROOT=/ostree/deploy/fedora-atomic/deploy/${FEDORA_ATOMIC_TREE_REF}.0 cp /etc/fstab $SYSROOT/etc/ # need to find the generated images DEPLOYED_DIRECTORY=$(find /boot/ostree -name fedora-atomic-* -type d) DEPLOYED_ID=${DEPLOYED_DIRECTORY##*-} INIT_IMAGE=$(find ${DEPLOYED_DIRECTORY} -name initramfs*.img) VMLINUZ_IMAGE=$(find ${DEPLOYED_DIRECTORY} -name vmlinuz*) # generate ostree boot cat > /etc/grub.d/15_ostree <$oac_templates/etc/os-collect-config.conf [DEFAULT] {{^os-collect-config.command}} command = os-refresh-config {{/os-collect-config.command}} {{#os-collect-config}} {{#command}} command = {{command}} {{/command}} {{#polling_interval}} polling_interval = {{polling_interval}} {{/polling_interval}} {{#cachedir}} cachedir = {{cachedir}} {{/cachedir}} {{#collectors}} collectors = {{.}} {{/collectors}} {{#cfn}} [cfn] {{#metadata_url}} metadata_url = {{metadata_url}} {{/metadata_url}} stack_name = {{stack_name}} secret_access_key = {{secret_access_key}} access_key_id = {{access_key_id}} path = {{path}} {{/cfn}} {{#heat}} [heat] auth_url = {{auth_url}} user_id = {{user_id}} password = {{password}} project_id = {{project_id}} stack_id = {{stack_id}} resource_name = {{resource_name}} {{/heat}} {{#zaqar}} [zaqar] auth_url = {{auth_url}} user_id = {{user_id}} password = {{password}} project_id = {{project_id}} queue_id = {{queue_id}} {{/zaqar}} {{#request}} [request] {{#metadata_url}} metadata_url = {{metadata_url}} {{/metadata_url}} {{/request}} {{/os-collect-config}} EOF mkdir -p $oac_templates/var/run/heat-config # template for writing heat deployments data to a file echo "{{deployments}}" > $oac_templates/var/run/heat-config/heat-config magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/scripts/configure_container_agent.sh0000666000175100017510000000237213244017334033572 0ustar zuulzuul00000000000000#!/bin/bash set -eux # initial /etc/os-collect-config.conf cat </etc/os-collect-config.conf [DEFAULT] command = os-refresh-config EOF # os-refresh-config scripts directory # This moves to /usr/libexec/os-refresh-config in later releases # Be sure to have this dir mounted and created by config.json and tmpfiles orc_scripts=/opt/stack/os-config-refresh for d in pre-configure.d configure.d migration.d post-configure.d; do install -m 0755 -o root -g root -d $orc_scripts/$d done # os-refresh-config script for running os-apply-config cat <$orc_scripts/configure.d/20-os-apply-config #!/bin/bash set -ue exec os-apply-config EOF chmod 700 $orc_scripts/configure.d/20-os-apply-config cp /opt/heat-container-agent/scripts/55-heat-config $orc_scripts/configure.d/55-heat-config chmod 700 $orc_scripts/configure.d/55-heat-config cp /opt/heat-container-agent/scripts/50-heat-config-docker-compose $orc_scripts/configure.d/50-heat-config-docker-compose chmod 700 $orc_scripts/configure.d/50-heat-config-docker-compose mkdir -p /var/lib/heat-config/hooks cp /opt/heat-container-agent/hooks/* /var/lib/heat-config/hooks/ chmod 755 /var/lib/heat-config/hooks/atomic chmod 755 /var/lib/heat-config/hooks/docker-compose chmod 755 /var/lib/heat-config/hooks/script magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/scripts/55-heat-config0000777000175100017510000001465713244017334030407 0ustar zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import shutil import stat import subprocess import sys import requests HOOKS_DIR_PATHS = ( os.environ.get('HEAT_CONFIG_HOOKS'), '/usr/libexec/heat-config/hooks', '/var/lib/heat-config/hooks', ) CONF_FILE = os.environ.get('HEAT_SHELL_CONFIG', '/var/run/heat-config/heat-config') DEPLOYED_DIR = os.environ.get('HEAT_CONFIG_DEPLOYED', '/var/lib/heat-config/deployed') OLD_DEPLOYED_DIR = os.environ.get('HEAT_CONFIG_DEPLOYED_OLD', '/var/run/heat-config/deployed') HEAT_CONFIG_NOTIFY = os.environ.get('HEAT_CONFIG_NOTIFY', 'heat-config-notify') def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') if not os.path.exists(CONF_FILE): log.error('No config file %s' % CONF_FILE) return 1 conf_mode = stat.S_IMODE(os.lstat(CONF_FILE).st_mode) if conf_mode != 0o600: os.chmod(CONF_FILE, 0o600) if not os.path.isdir(DEPLOYED_DIR): if DEPLOYED_DIR != OLD_DEPLOYED_DIR and os.path.isdir(OLD_DEPLOYED_DIR): log.debug('Migrating deployed state from %s to %s' % (OLD_DEPLOYED_DIR, DEPLOYED_DIR)) shutil.move(OLD_DEPLOYED_DIR, DEPLOYED_DIR) else: os.makedirs(DEPLOYED_DIR, 0o700) try: configs = json.load(open(CONF_FILE)) except ValueError: pass else: for c in configs: try: invoke_hook(c, log) except Exception as e: log.exception(e) def find_hook_path(group): # sanitise the group to get an alphanumeric hook file name hook = "".join( x for x in group if x == '-' or x == '_' or x.isalnum()) for h in HOOKS_DIR_PATHS: if not h or not os.path.exists(h): continue hook_path = os.path.join(h, hook) if os.path.exists(hook_path): return hook_path def invoke_hook(c, log): # Sanitize input values (bug 1333992). Convert all String # inputs to strings if they're not already hot_inputs = c.get('inputs', []) for hot_input in hot_inputs: if hot_input.get('type', None) == 'String' and \ not isinstance(hot_input['value'], basestring): hot_input['value'] = str(hot_input['value']) iv = dict((i['name'], i['value']) for i in c['inputs']) # The group property indicates whether it is softwarecomponent or # plain softwareconfig # If it is softwarecomponent, pick up a property config to invoke # according to deploy_action group = c.get('group') if group == 'component': found = False action = iv.get('deploy_action') config = c.get('config') configs = config.get('configs') if configs: for cfg in configs: if action in cfg['actions']: c['config'] = cfg['config'] c['group'] = cfg['tool'] found = True break if not found: log.warn('Skipping group %s, no valid script is defined' ' for deploy action %s' % (group, action)) return # check to see if this config is already deployed deployed_path = os.path.join(DEPLOYED_DIR, '%s.json' % c['id']) if os.path.exists(deployed_path): log.warn('Skipping config %s, already deployed' % c['id']) log.warn('To force-deploy, rm %s' % deployed_path) return signal_data = {} hook_path = find_hook_path(c['group']) if not hook_path: log.warn('Skipping group %s with no hook script %s' % ( c['group'], hook_path)) return # write out config, which indicates it is deployed regardless of # subsequent hook success with os.fdopen(os.open( deployed_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: json.dump(c, f, indent=2) log.debug('Running %s < %s' % (hook_path, deployed_path)) subproc = subprocess.Popen([hook_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate(input=json.dumps(c)) log.info(stdout) log.debug(stderr) if subproc.returncode: log.error("Error running %s. [%s]\n" % ( hook_path, subproc.returncode)) else: log.info('Completed %s' % hook_path) try: if stdout: signal_data = json.loads(stdout) except ValueError: signal_data = { 'deploy_stdout': stdout, 'deploy_stderr': stderr, 'deploy_status_code': subproc.returncode, } signal_data_path = os.path.join(DEPLOYED_DIR, '%s.notify.json' % c['id']) # write out notify data for debugging with os.fdopen(os.open( signal_data_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: json.dump(signal_data, f, indent=2) log.debug('Running %s %s < %s' % ( HEAT_CONFIG_NOTIFY, deployed_path, signal_data_path)) subproc = subprocess.Popen([HEAT_CONFIG_NOTIFY, deployed_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate(input=json.dumps(signal_data)) log.info(stdout) if subproc.returncode: log.error( "Error running heat-config-notify. [%s]\n" % subproc.returncode) log.error(stderr) else: log.debug(stderr) if __name__ == '__main__': sys.exit(main(sys.argv)) magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/scripts/heat-config-notify0000777000175100017510000001220513244017334031451 0ustar zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import sys import requests try: from heatclient import client as heatclient except ImportError: heatclient = None try: from keystoneclient.v3 import client as ksclient except ImportError: ksclient = None try: from zaqarclient.queues.v1 import client as zaqarclient except ImportError: zaqarclient = None MAX_RESPONSE_SIZE = 950000 def init_logging(): log = logging.getLogger('heat-config-notify') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') return log def trim_response(response, trimmed_values=None): """Trim selected values from response. Makes given response smaller or the same size as MAX_RESPONSE_SIZE by trimming given trimmed_values from response dict from the left side (beginning). Returns trimmed and serialized JSON response itself. """ trimmed_values = trimmed_values or ('deploy_stdout', 'deploy_stderr') str_response = json.dumps(response, ensure_ascii=True, encoding='utf-8') len_total = len(str_response) offset = MAX_RESPONSE_SIZE - len_total if offset >= 0: return str_response offset = abs(offset) for key in trimmed_values: len_value = len(response[key]) cut = int(round(float(len_value) / len_total * offset)) response[key] = response[key][cut:] str_response = json.dumps(response, ensure_ascii=True, encoding='utf-8') return str_response def main(argv=sys.argv, stdin=sys.stdin): log = init_logging() usage = ('Usage:\n heat-config-notify /path/to/config.json ' '< /path/to/signal_data.json') if len(argv) < 2: log.error(usage) return 1 try: signal_data = json.load(stdin) except ValueError: log.warn('No valid json found on stdin') signal_data = {} conf_file = argv[1] if not os.path.exists(conf_file): log.error('No config file %s' % conf_file) log.error(usage) return 1 c = json.load(open(conf_file)) iv = dict((i['name'], i['value']) for i in c['inputs']) if 'deploy_signal_id' in iv: sigurl = iv.get('deploy_signal_id') sigverb = iv.get('deploy_signal_verb', 'POST') log.debug('Signaling to %s via %s' % (sigurl, sigverb)) # we need to trim log content because Heat response size is limited # by max_json_body_size = 1048576 str_signal_data = trim_response(signal_data) if sigverb == 'PUT': r = requests.put(sigurl, data=str_signal_data, headers={'content-type': 'application/json'}) else: r = requests.post(sigurl, data=str_signal_data, headers={'content-type': 'application/json'}) log.debug('Response %s ' % r) if 'deploy_queue_id' in iv: queue_id = iv.get('deploy_queue_id') log.debug('Signaling to queue %s' % (queue_id,)) ks = ksclient.Client( auth_url=iv['deploy_auth_url'], user_id=iv['deploy_user_id'], password=iv['deploy_password'], project_id=iv['deploy_project_id']) endpoint = ks.service_catalog.url_for( service_type='messaging', endpoint_type='publicURL') conf = { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_auth_token': ks.auth_token, 'os_project_id': iv['deploy_project_id'], } } } cli = zaqarclient.Client(endpoint, conf=conf, version=1.1) queue = cli.queue(queue_id) r = queue.post({'body': signal_data, 'ttl': 600}) log.debug('Response %s ' % r) elif 'deploy_auth_url' in iv: ks = ksclient.Client( auth_url=iv['deploy_auth_url'], user_id=iv['deploy_user_id'], password=iv['deploy_password'], project_id=iv['deploy_project_id']) endpoint = ks.service_catalog.url_for( service_type='orchestration', endpoint_type='publicURL') log.debug('Signalling to %s' % endpoint) heat = heatclient.Client( '1', endpoint, token=ks.auth_token) r = heat.resources.signal( iv.get('deploy_stack_id'), iv.get('deploy_resource_name'), data=signal_data) log.debug('Response %s ' % r) return 0 if __name__ == '__main__': sys.exit(main(sys.argv, sys.stdin)) magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/scripts/50-heat-config-docker-compose0000777000175100017510000000665413244017334033310 0ustar zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import subprocess import sys import yaml CONF_FILE = os.environ.get('HEAT_SHELL_CONFIG', '/var/run/heat-config/heat-config') DOCKER_COMPOSE_DIR = os.environ.get( 'HEAT_DOCKER_COMPOSE_WORKING', '/var/lib/heat-config/heat-config-docker-compose') DOCKER_COMPOSE_CMD = os.environ.get('HEAT_DOCKER_COMPOSE_CMD', 'docker-compose') def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') if not os.path.exists(CONF_FILE): log.error('No config file %s' % CONF_FILE) return 1 if not os.path.isdir(DOCKER_COMPOSE_DIR): os.makedirs(DOCKER_COMPOSE_DIR, 0o700) try: configs = json.load(open(CONF_FILE)) except ValueError: pass try: cleanup_stale_projects(configs) for c in configs: write_compose_config(c) except Exception as e: log.exception(e) def cleanup_stale_projects(configs): def deployments(configs): for c in configs: yield c['name'] def compose_projects(compose_dir): for proj in os.listdir(compose_dir): if os.path.isfile( os.path.join(DOCKER_COMPOSE_DIR, '%s/docker-compose.yml' % proj)): yield proj def cleanup_containers(project): cmd = [ DOCKER_COMPOSE_CMD, 'kill' ] subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate() for proj in compose_projects(DOCKER_COMPOSE_DIR): if proj not in deployments(configs): proj_dir = os.path.join(DOCKER_COMPOSE_DIR, proj) os.chdir(proj_dir) cleanup_containers(proj) os.remove('%s/docker-compose.yml' % proj_dir) def write_compose_config(c): group = c.get('group') if group != 'docker-compose': return def prepare_dir(path): if not os.path.isdir(path): os.makedirs(path, 0o700) compose_conf = c.get('config', '') if isinstance(compose_conf, dict): yaml_config = yaml.safe_dump(compose_conf, default_flow_style=False) else: yaml_config = compose_conf proj_dir = os.path.join(DOCKER_COMPOSE_DIR, c['name']) prepare_dir(proj_dir) fn = os.path.join(proj_dir, 'docker-compose.yml') with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600), 'w') as f: f.write(yaml_config.encode('utf-8')) if __name__ == '__main__': sys.exit(main(sys.argv)) magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/manifest.json0000666000175100017510000000007113244017334027041 0ustar zuulzuul00000000000000{ "defaultValues": {}, "version": "1.0" }magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/Dockerfile0000666000175100017510000000307113244017334026335 0ustar zuulzuul00000000000000FROM registry.fedoraproject.org/fedora:rawhide # Fill out the labels LABEL name="heat-container-agent" \ maintainer="Spyros Trigazis " \ license="UNKNOWN" \ summary="Heat Container Agent system image" \ version="1.0" \ help="No help" \ architecture="x86_64" \ atomic.type="system" \ distribution-scope="public" RUN dnf -y --setopt=tsflags=nodocs install \ findutils os-collect-config os-apply-config \ os-refresh-config dib-utils python-pip python-docker-py \ python-yaml python-zaqarclient python2-oslo-log \ python-psutil kubernetes-client && dnf clean all # pip installing dpath as python-dpath is an older version of dpath # install docker-compose RUN pip install --no-cache dpath docker-compose ADD ./scripts/55-heat-config \ /opt/heat-container-agent/scripts/ ADD ./scripts/50-heat-config-docker-compose \ /opt/heat-container-agent/scripts/ ADD ./scripts/hooks/* \ /opt/heat-container-agent/hooks/ ADD ./scripts/heat-config-notify \ /usr/bin/heat-config-notify RUN chmod 755 /usr/bin/heat-config-notify ADD ./scripts/configure_container_agent.sh /opt/heat-container-agent/ RUN chmod 700 /opt/heat-container-agent/configure_container_agent.sh ADD ./scripts/write-os-apply-config-templates.sh /tmp RUN chmod 700 /tmp/write-os-apply-config-templates.sh RUN /tmp/write-os-apply-config-templates.sh COPY manifest.json service.template config.json.template tmpfiles.template /exports/ COPY launch /usr/bin/start-heat-container-agent # Execution CMD ["/usr/bin/start-heat-container-agent"] magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/service.template0000666000175100017510000000027413244017334027542 0ustar zuulzuul00000000000000[Unit] Description=Heat Container Agent system image [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP Restart=on-failure WorkingDirectory=$DESTDIR [Install] WantedBy=multi-user.targetmagnum-6.1.0/magnum/drivers/common/image/heat-container-agent/config.json.template0000666000175100017510000003553213244017334030324 0ustar zuulzuul00000000000000{ "hooks": {}, "hostname": "acme", "linux": { "namespaces": [ { "type": "mount" }, { "type": "ipc" }, { "type": "uts" } ], "resources": { "devices": [ { "access": "rwm", "allow": false } ] } }, "mounts": [ { "type": "bind", "source": "/srv/magnum", "destination": "/srv/magnum", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/opt/stack/os-config-refresh", "destination": "/opt/stack/os-config-refresh", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/run/systemd", "destination": "/run/systemd", "options": [ "rbind", "ro", "rprivate" ] }, { "type": "bind", "source": "/etc/", "destination": "/etc/", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/lib", "destination": "/var/lib", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/run", "destination": "/var/run", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/log", "destination": "/var/log", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/tmp", "destination": "/tmp", "options": [ "rbind", "rw", "rprivate" ] }, { "destination": "/proc", "source": "proc", "type": "proc" }, { "destination": "/dev", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ], "source": "tmpfs", "type": "tmpfs" }, { "destination": "/dev/pts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ], "source": "devpts", "type": "devpts" }, { "destination": "/dev/shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ], "source": "shm", "type": "tmpfs" }, { "destination": "/dev/mqueue", "options": [ "nosuid", "noexec", "nodev" ], "source": "mqueue", "type": "mqueue" }, { "destination": "/sys", "options": [ "nosuid", "noexec", "nodev", "ro" ], "source": "sysfs", "type": "sysfs" }, { "destination": "/sys/fs/cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ], "source": "cgroup", "type": "cgroup" } ], "ociVersion": "0.6.0-dev", "platform": { "arch": "amd64", "os": "linux" }, "process": { "args": [ "/usr/bin/start-heat-container-agent" ], "capabilities": { "bounding": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "permitted": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "inheritable": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "effective": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ] }, "cwd": "/", "env": [ "REQUESTS_CA_BUNDLE=$REQUESTS_CA_BUNDLE", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "SYSTEMD_IGNORE_CHROOT=1", "TERM=xterm" ], "rlimits": [ { "hard": 1024, "soft": 1024, "type": "RLIMIT_NOFILE" } ], "terminal": false, "user": {} }, "root": { "path": "rootfs", "readonly": true } } magnum-6.1.0/magnum/drivers/common/__init__.py0000666000175100017510000000000013244017334021362 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/0000775000175100017510000000000013244017675021267 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/kubernetes/0000775000175100017510000000000013244017675023436 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/0000775000175100017510000000000013244017675025424 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh0000666000175100017510000000762013244017343033376 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "configuring kubernetes (master)" _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} atomic install --storage ostree --system --system-package=no --name=kube-apiserver ${_prefix}kubernetes-apiserver:${KUBE_TAG} atomic install --storage ostree --system --system-package=no --name=kube-controller-manager ${_prefix}kubernetes-controller-manager:${KUBE_TAG} atomic install --storage ostree --system --system-package=no --name=kube-scheduler ${_prefix}kubernetes-scheduler:${KUBE_TAG} sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_MASTER=/ s|=.*|="--master=http://127.0.0.1:8080"| ' /etc/kubernetes/config CERT_DIR=/etc/kubernetes/certs KUBE_API_ARGS="--runtime-config=api/all=true" KUBE_API_ARGS="$KUBE_API_ARGS --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP" KUBE_API_ARGS="$KUBE_API_ARGS $KUBEAPI_OPTIONS" if [ "$TLS_DISABLED" == "True" ]; then KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0 --insecure-port=$KUBE_API_PORT" else KUBE_API_ADDRESS="--bind-address=0.0.0.0 --secure-port=$KUBE_API_PORT" # insecure port is used internaly KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-bind-address=127.0.0.1 --insecure-port=8080" KUBE_API_ARGS="$KUBE_API_ARGS --authorization-mode=Node,RBAC --tls-cert-file=$CERT_DIR/server.crt" KUBE_API_ARGS="$KUBE_API_ARGS --tls-private-key-file=$CERT_DIR/server.key" KUBE_API_ARGS="$KUBE_API_ARGS --client-ca-file=$CERT_DIR/ca.crt" fi KUBE_ADMISSION_CONTROL="" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_ADMISSION_CONTROL="--admission-control=NodeRestriction,${ADMISSION_CONTROL_LIST}" fi if [ -n "$TRUST_ID" ]; then KUBE_API_ARGS="$KUBE_API_ARGS --cloud-config=/etc/kubernetes/kube_openstack_config --cloud-provider=openstack" fi sed -i ' /^KUBE_API_ADDRESS=/ s/=.*/="'"${KUBE_API_ADDRESS}"'"/ /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| /^KUBE_API_ARGS=/ s|=.*|="'"${KUBE_API_ARGS}"'"| /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ /^KUBE_ADMISSION_CONTROL=/ s/=.*/="'"${KUBE_ADMISSION_CONTROL}"'"/ ' /etc/kubernetes/apiserver # Add controller manager args KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/server.key --root-ca-file=$CERT_DIR/ca.crt" fi if [ -n "$TRUST_ID" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cloud-config=/etc/kubernetes/kube_openstack_config --cloud-provider=openstack" fi if [ -n "$CERT_MANAGER_API" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-signing-cert-file=$CERT_DIR/ca.crt --cluster-signing-key-file=$CERT_DIR/ca.key" fi sed -i ' /^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/ /^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"# ' /etc/kubernetes/controller-manager sed -i '/^KUBE_SCHEDULER_ARGS=/ s/=.*/="--leader-elect=true"/' /etc/kubernetes/scheduler HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') KUBELET_ARGS="--register-node=true --register-schedulable=false --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${HOSTNAME_OVERRIDE}" KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker if [ -n "${INSECURE_REGISTRY_URL}" ]; then echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker fi magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.yaml0000666000175100017510000000343113244017334031635 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING" KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" KUBE_MASTER_IP="$KUBE_MASTER_IP" KUBE_API_PORT="$KUBE_API_PORT" KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" KUBE_NODE_IP="$KUBE_NODE_IP" ETCD_SERVER_IP="$ETCD_SERVER_IP" ENABLE_CINDER="$ENABLE_CINDER" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER" REGISTRY_ENABLED="$REGISTRY_ENABLED" REGISTRY_PORT="$REGISTRY_PORT" SWIFT_REGION="$SWIFT_REGION" REGISTRY_CONTAINER="$REGISTRY_CONTAINER" REGISTRY_INSECURE="$REGISTRY_INSECURE" REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE" TLS_DISABLED="$TLS_DISABLED" VERIFY_CA="$VERIFY_CA" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" AUTH_URL="$AUTH_URL" USERNAME="$USERNAME" PASSWORD="$PASSWORD" VOLUME_DRIVER="$VOLUME_DRIVER" REGION_NAME="$REGION_NAME" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" WAIT_CURL="$WAIT_CURL" KUBE_TAG="$KUBE_TAG" FLANNEL_TAG="$FLANNEL_TAG" KUBE_VERSION="$KUBE_VERSION" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUST_ID="$TRUST_ID" INSECURE_REGISTRY_URL="$INSECURE_REGISTRY_URL" CONTAINER_INFRA_PREFIX="$CONTAINER_INFRA_PREFIX" DNS_SERVICE_IP="$DNS_SERVICE_IP" DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" KUBELET_OPTIONS="$KUBELET_OPTIONS" KUBEPROXY_OPTIONS="$KUBEPROXY_OPTIONS" magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller0000666000175100017510000000067713244017334032432 0ustar zuulzuul00000000000000#!/bin/bash # Enables the specified ingress controller. # # Currently there is only support for traefik. . /etc/sysconfig/heat-params function writeFile { # $1 is filename # $2 is file content [ -f ${1} ] || { echo "Writing File: $1" mkdir -p $(dirname ${1}) cat << EOF > ${1} $2 EOF } } if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then $enable-ingress-traefik fi magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-prometheus-monitoring0000666000175100017510000003222113244017343033143 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params function writeFile { # $1 is filename # $2 is file content [ -f ${1} ] || { echo "Writing File: $1" mkdir -p $(dirname ${1}) cat << EOF > ${1} $2 EOF } } prometheusConfigMap_file=/srv/magnum/kubernetes/monitoring/prometheusConfigMap.yaml [ -f ${prometheusConfigMap_file} ] || { echo "Writing File: $prometheusConfigMap_file" mkdir -p $(dirname ${prometheusConfigMap_file}) # NOTE: EOF needs to be in quotes in order to not escape the $ characters cat << 'EOF' > ${prometheusConfigMap_file} apiVersion: v1 kind: ConfigMap metadata: name: prometheus namespace: kube-system data: prometheus.yml: | global: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s scrape_configs: - job_name: 'kubernetes-apiservers' kubernetes_sd_configs: - role: endpoints scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: default;kubernetes;https - job_name: 'kubernetes-nodes' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics - job_name: 'kubernetes-cadvisor' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - role: endpoints relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] action: replace target_label: __scheme__ regex: (https?) - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] action: replace target_label: __address__ regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name - job_name: 'kubernetes-services' metrics_path: /probe params: module: [http_2xx] kubernetes_sd_configs: - role: service relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] action: keep regex: true - source_labels: [__address__] target_label: __param_target - target_label: __address__ replacement: blackbox - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name - job_name: 'kubernetes-pods' kubernetes_sd_configs: - role: pod relabel_configs: - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] action: replace regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name - job_name: 'kubernetes-node-exporter' tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - source_labels: [__meta_kubernetes_role] action: replace target_label: kubernetes_role - source_labels: [__address__] regex: '(.*):10250' replacement: '${1}:9100' target_label: __address__ EOF } prometheusService_file=/srv/magnum/kubernetes/monitoring/prometheusService.yaml prometheusService_content=$(cat < /etc/sysconfig/flanneld < $FLANNEL_JSON < ${cert_dir}/ca.key chown kube.kube ${cert_dir}/ca.key chmod 400 ${cert_dir}/ca.key magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/write-kube-os-config.sh0000666000175100017510000000114513244017334031713 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params mkdir -p /etc/kubernetes/ KUBE_OS_CLOUD_CONFIG=/etc/kubernetes/kube_openstack_config cp /etc/pki/tls/certs/ca-bundle.crt /etc/kubernetes/ca-bundle.crt # Generate a the configuration for Kubernetes services # to talk to OpenStack Neutron and Cinder cat > $KUBE_OS_CLOUD_CONFIG < $WC_NOTIFY_BIN < $WC_NOTIFY_SERVICE <> $FLANNEL_DOCKER_BRIDGE_BIN <&2 exit 1 fi # NOTE(mnaser): Since Docker 1.13, it does not set the default forwarding # policy to ACCEPT which will cause CNI networking to fail. iptables -P FORWARD ACCEPT mkdir -p /run/flannel/ cat > /run/flannel/docker <> $FLANNEL_DOCKER_BRIDGE_SERVICE <> $DOCKER_FLANNEL_CONF <> $FLANNEL_DOCKER_BRIDGE_CONF < ${CORE_DNS} apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors log stdout health kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} ${PODS_NETWORK_CIDR} { pods verified } proxy . /etc/resolv.conf cache 30 } --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/name: "CoreDNS" spec: replicas: 1 selector: matchLabels: k8s-app: coredns template: metadata: labels: k8s-app: coredns spec: serviceAccountName: coredns tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: "CriticalAddonsOnly" operator: "Exists" containers: - name: coredns image: ${_prefix}coredns:1.0.1 imagePullPolicy: Always args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: coredns clusterIP: ${DNS_SERVICE_IP} ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP EOF } echo "Waiting for Kubernetes API..." until curl --silent "http://127.0.0.1:8080/version" do sleep 5 done kubectl create --validate=false -f $CORE_DNS magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml0000666000175100017510000000544113244017334033131 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING" KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS" KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS" KUBE_API_PORT="$KUBE_API_PORT" KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" KUBE_NODE_IP="$KUBE_NODE_IP" KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" ENABLE_CINDER="$ENABLE_CINDER" ETCD_VOLUME="$ETCD_VOLUME" ETCD_VOLUME_SIZE="$ETCD_VOLUME_SIZE" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER" FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" FLANNEL_BACKEND="$FLANNEL_BACKEND" PODS_NETWORK_CIDR="$PODS_NETWORK_CIDR" PORTAL_NETWORK_CIDR="$PORTAL_NETWORK_CIDR" ADMISSION_CONTROL_LIST="$ADMISSION_CONTROL_LIST" ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL" USERNAME="$USERNAME" PASSWORD="$PASSWORD" CLUSTER_SUBNET="$CLUSTER_SUBNET" TLS_DISABLED="$TLS_DISABLED" KUBE_DASHBOARD_ENABLED="$KUBE_DASHBOARD_ENABLED" INFLUX_GRAFANA_DASHBOARD_ENABLED="$INFLUX_GRAFANA_DASHBOARD_ENABLED" VERIFY_CA="$VERIFY_CA" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" VOLUME_DRIVER="$VOLUME_DRIVER" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" WAIT_CURL="$WAIT_CURL" KUBE_TAG="$KUBE_TAG" ETCD_TAG="$ETCD_TAG" KUBE_VERSION="$KUBE_VERSION" KUBE_DASHBOARD_VERSION="$KUBE_DASHBOARD_VERSION" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUST_ID="$TRUST_ID" AUTH_URL="$AUTH_URL" INSECURE_REGISTRY_URL="$INSECURE_REGISTRY_URL" CONTAINER_INFRA_PREFIX="$CONTAINER_INFRA_PREFIX" SYSTEM_PODS_INITIAL_DELAY="$SYSTEM_PODS_INITIAL_DELAY" SYSTEM_PODS_TIMEOUT="$SYSTEM_PODS_TIMEOUT" ETCD_LB_VIP="$ETCD_LB_VIP" DNS_SERVICE_IP="$DNS_SERVICE_IP" DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" CERT_MANAGER_API="$CERT_MANAGER_API" CA_KEY="$CA_KEY" CALICO_TAG="$CALICO_TAG" CALICO_CNI_TAG="$CALICO_CNI_TAG" CALICO_KUBE_CONTROLLERS_TAG="$CALICO_KUBE_CONTROLLERS_TAG" CALICO_IPV4POOL="$CALICO_IPV4POOL" INGRESS_CONTROLLER="$INGRESS_CONTROLLER" INGRESS_CONTROLLER_ROLE="$INGRESS_CONTROLLER_ROLE" KUBELET_OPTIONS="$KUBELET_OPTIONS" KUBECONTROLLER_OPTIONS="$KUBECONTROLLER_OPTIONS" KUBEAPI_OPTIONS="$KUBEAPI_OPTIONS" KUBEPROXY_OPTIONS="$KUBEPROXY_OPTIONS" KUBESCHEDULER_OPTIONS="$KUBESCHEDULER_OPTIONS" magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh0000666000175100017510000000053613244017334032167 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params set -ux _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} atomic install \ --storage ostree \ --system \ --system-package no \ --set REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt \ --name heat-container-agent \ ${_prefix}heat-container-agent:rawhide systemctl start heat-container-agent magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh0000666000175100017510000000766713244017334031116 0ustar zuulzuul00000000000000#!/bin/sh # Copyright 2014 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. . /etc/sysconfig/heat-params set -o errexit set -o nounset set -o pipefail if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi cert_dir=/etc/kubernetes/certs mkdir -p "$cert_dir" CA_CERT=$cert_dir/ca.crt function generate_certificates { _CERT=$cert_dir/${1}.crt _CSR=$cert_dir/${1}.csr _KEY=$cert_dir/${1}.key _CONF=$2 #Get a token by user credentials and trust auth_json=$(cat << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } } } } EOF ) content_type='Content-Type: application/json' url="$AUTH_URL/auth/tokens" USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \ | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` # Get CA certificate for this cluster curl $VERIFY_CA -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT # Generate client's private key and csr openssl genrsa -out "${_KEY}" 4096 chmod 400 "${_KEY}" openssl req -new -days 1000 \ -key "${_KEY}" \ -out "${_CSR}" \ -reqexts req_ext \ -config "${_CONF}" # Send csr to Magnum to have it signed csr_req=$(python -c "import json; fp = open('${_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()") curl $VERIFY_CA -X POST \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ -H "Content-Type: application/json" \ -d "$csr_req" \ $MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${_CERT} } #Kubelet Certs INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//') cat > ${cert_dir}/kubelet.conf < ${cert_dir}/proxy.conf <&2 exit 1 fi device_path=/dev/disk/by-id/${device_name} fstype=$(blkid -s TYPE -o value ${device_path}) if [ "${fstype}" != "xfs" ]; then mkfs.xfs -f ${device_path} fi mkdir -p /var/lib/etcd echo "${device_path} /var/lib/etcd xfs defaults 0 0" >> /etc/fstab mount -a chown -R etcd.etcd /var/lib/etcd chmod 755 /var/lib/etcd fi _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} atomic install \ --system-package no \ --system \ --storage ostree \ --name=etcd ${_prefix}etcd:${ETCD_TAG} if [ -z "$KUBE_NODE_IP" ]; then # FIXME(yuanying): Set KUBE_NODE_IP correctly KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi myip="${KUBE_NODE_IP}" cert_dir="/etc/etcd/certs" protocol="https" if [ "$TLS_DISABLED" = "True" ]; then protocol="http" fi cat > /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf fi magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh0000666000175100017510000000305213244017334032344 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi CERT_DIR=/etc/kubernetes/certs PROTOCOL=https ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ --cert $CERT_DIR/server.crt --key $CERT_DIR/server.key" FLANNELD_CONFIG=/etc/sysconfig/flanneld if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http ETCD_CURL_OPTIONS="" fi . $FLANNELD_CONFIG FLANNEL_CONFIG_BIN=/usr/local/bin/flannel-config FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service FLANNEL_JSON=/etc/sysconfig/flannel-network.json echo "creating $FLANNEL_CONFIG_BIN" cat > $FLANNEL_CONFIG_BIN <&2 exit 1 fi if [ -z "$FLANNEL_ETCD_ENDPOINTS" ] || [ -z "$FLANNEL_ETCD_PREFIX" ]; then echo "ERROR: missing required configuration" >&2 exit 1 fi echo "creating flanneld config in etcd" while ! curl -sf -L $ETCD_CURL_OPTIONS \ $FLANNEL_ETCD_ENDPOINTS/v2/keys${FLANNEL_ETCD_PREFIX}/config \ -X PUT --data-urlencode value@${FLANNEL_JSON}; do echo "waiting for etcd" sleep 1 done EOF cat > $FLANNEL_CONFIG_SERVICE < ${CA_CERT} # Create config for server's csr cat > ${cert_dir}/server.conf < ${SERVER_CERT} # Common certs and key are created for both etcd and kubernetes services. # Both etcd and kube user should have permission to access the certs and key. groupadd kube_etcd usermod -a -G kube_etcd etcd usermod -a -G kube_etcd kube chmod 550 "${cert_dir}" chown -R kube:kube_etcd "${cert_dir}" chmod 440 $SERVER_KEY mkdir -p /etc/etcd/certs cp ${cert_dir}/* /etc/etcd/certs magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh0000666000175100017510000000136313244017334031774 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then exit 0 fi # Write node-exporter manifest as a regular pod node_exporter_file=/etc/kubernetes/manifests/node-exporter.yaml [ -f ${node_exporter_file} ] || { echo "Writing File: $node_exporter_file" mkdir -p $(dirname ${node_exporter_file}) cat << EOF > ${node_exporter_file} apiVersion: v1 kind: Pod metadata: name: node-exporter namespace: kube-system annotations: prometheus.io/scrape: "true" labels: app: node-exporter spec: containers: - name: node-exporter image: ${CONTAINER_INFRA_PREFIX:-docker.io/prom/}node-exporter ports: - containerPort: 9100 hostPort: 9100 EOF } magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh0000666000175100017510000000273713244017334027672 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params DOCKER_HTTP_PROXY_CONF=/etc/systemd/system/docker.service.d/http_proxy.conf DOCKER_HTTPS_PROXY_CONF=/etc/systemd/system/docker.service.d/https_proxy.conf DOCKER_NO_PROXY_CONF=/etc/systemd/system/docker.service.d/no_proxy.conf DOCKER_RESTART=0 BASH_RC=/etc/bashrc mkdir -p /etc/systemd/system/docker.service.d if [ -n "$HTTP_PROXY" ]; then cat < $DOCKER_HTTP_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then cat < $DOCKER_HTTPS_PROXY_CONF [Service] Environment=HTTPS_PROXY=$HTTPS_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$NO_PROXY" ]; then cat < $DOCKER_NO_PROXY_CONF [Service] Environment=NO_PROXY=$NO_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting no_proxy" fi fi if [ "$DOCKER_RESTART" -eq 1 ]; then systemctl daemon-reload systemctl --no-block restart docker.service fi magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh0000666000175100017510000002755313244017334032277 0ustar zuulzuul00000000000000#!/bin/bash -x . /etc/sysconfig/heat-params if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then exit 0 fi KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}" HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2" KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml [ -f ${KUBE_DASH_DEPLOY} ] || { echo "Writing File: $KUBE_DASH_DEPLOY" mkdir -p $(dirname ${KUBE_DASH_DEPLOY}) cat << EOF > ${KUBE_DASH_DEPLOY} # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configuration to deploy release version of the Dashboard UI compatible with # Kubernetes 1.8. # # Example usage: kubectl create -f # ------------------- Dashboard Secret ------------------- # apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kube-system type: Opaque --- # ------------------- Dashboard Service Account ------------------- # apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system --- # ------------------- Dashboard Role & Role Binding ------------------- # kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal namespace: kube-system rules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [""] resources: ["secrets"] verbs: ["create"] # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] verbs: ["create"] # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics from heapster. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:"] verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubernetes-dashboard-minimal namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard-minimal subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system --- # ------------------- Dashboard Deployment ------------------- # kind: Deployment apiVersion: apps/v1beta2 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP image: ${KUBE_DASH_IMAGE} ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --heapster-host=heapster:80 # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule --- # ------------------- Dashboard Service ------------------- # kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard --- # Grant admin privileges to the dashboard serviceacount apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard labels: k8s-app: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system EOF } INFLUX_SINK="" # Deploy INFLUX AND GRAFANA if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086" INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3" GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3" INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml [ -f ${INFLUX_DEPLOY} ] || { echo "Writing File: $INFLUX_DEPLOY" mkdir -p $(dirname ${INFLUX_DEPLOY}) cat << EOF > ${INFLUX_DEPLOY} apiVersion: extensions/v1beta1 kind: Deployment metadata: name: monitoring-influxdb namespace: kube-system spec: replicas: 1 template: metadata: labels: task: monitoring k8s-app: influxdb spec: containers: - name: influxdb image: ${INFLUX_IMAGE} volumeMounts: - mountPath: /data name: influxdb-storage volumes: - name: influxdb-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: task: monitoring # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. # kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-influxdb name: monitoring-influxdb namespace: kube-system spec: ports: - port: 8086 targetPort: 8086 selector: k8s-app: influxdb EOF } [ -f ${GRAFANA_DEPLOY} ] || { echo "Writing File: $GRAFANA_DEPLOY" mkdir -p $(dirname ${GRAFANA_DEPLOY}) cat << EOF > ${GRAFANA_DEPLOY} apiVersion: extensions/v1beta1 kind: Deployment metadata: name: monitoring-grafana namespace: kube-system spec: replicas: 1 template: metadata: labels: task: monitoring k8s-app: grafana spec: containers: - name: grafana image: ${GRAFANA_IMAGE} ports: - containerPort: 3000 protocol: TCP volumeMounts: - mountPath: /etc/ssl/certs name: ca-certificates readOnly: true - mountPath: /var name: grafana-storage env: - name: INFLUXDB_HOST value: monitoring-influxdb - name: GF_SERVER_HTTP_PORT value: "3000" # The following env variables are required to make Grafana accessible via # the kubernetes api-server proxy. On production clusters, we recommend # removing these env variables, setup auth for grafana, and expose the grafana # service using a LoadBalancer or a public IP. - name: GF_AUTH_BASIC_ENABLED value: "false" - name: GF_AUTH_ANONYMOUS_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ORG_ROLE value: Admin - name: GF_SERVER_ROOT_URL # If you're only using the API Server proxy, set this value instead: # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy value: / volumes: - name: ca-certificates hostPath: path: /etc/ssl/certs - name: grafana-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. # kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-grafana name: monitoring-grafana namespace: kube-system spec: # In a production setup, we recommend accessing Grafana through an external Loadbalancer # or through a public IP. # type: LoadBalancer # You could also use NodePort to expose the service at a randomly-generated port # type: NodePort ports: - port: 80 targetPort: 3000 selector: k8s-app: grafana EOF } echo "Waiting for Kubernetes API..." until curl --silent "http://127.0.0.1:8080/version" do sleep 5 done kubectl apply --validate=false -f $INFLUX_DEPLOY kubectl apply --validate=false -f $GRAFANA_DEPLOY fi # Deploy Heapster HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml [ -f ${HEAPSTER_DEPLOY} ] || { echo "Writing File: $HEAPSTER_DEPLOY" mkdir -p $(dirname ${HEAPSTER_DEPLOY}) cat << EOF > ${HEAPSTER_DEPLOY} apiVersion: v1 kind: ServiceAccount metadata: name: heapster namespace: kube-system --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: heapster namespace: kube-system spec: replicas: 1 template: metadata: labels: task: monitoring k8s-app: heapster spec: serviceAccountName: heapster containers: - name: heapster image: ${HEAPSTER_IMAGE} imagePullPolicy: IfNotPresent command: - /heapster - --source=kubernetes:https://kubernetes.default ${INFLUX_SINK} --- apiVersion: v1 kind: Service metadata: labels: task: monitoring # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. kubernetes.io/cluster-service: 'true' kubernetes.io/name: Heapster name: heapster namespace: kube-system spec: ports: - port: 80 targetPort: 8082 selector: k8s-app: heapster --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: heapster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:heapster subjects: - kind: ServiceAccount name: heapster namespace: kube-system EOF } echo "Waiting for Kubernetes API..." until curl --silent "http://127.0.0.1:8080/version" do sleep 5 done kubectl apply --validate=false -f $KUBE_DASH_DEPLOY kubectl apply --validate=false -f $HEAPSTER_DEPLOY magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh0000666000175100017510000003310213244017334030641 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$NETWORK_DRIVER" != "calico" ]; then exit 0 fi _prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP} CERT_DIR=/etc/kubernetes/certs ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'` ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'` ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'` CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml [ -f ${CALICO_DEPLOY} ] || { echo "Writing File: $CALICO_DEPLOY" mkdir -p $(dirname ${CALICO_DEPLOY}) cat << EOF > ${CALICO_DEPLOY} # Calico Version v2.6.7 # https://docs.projectcalico.org/v2.6/releases#v2.6.7 # This manifest includes the following component versions: # calico/node:v2.6.7 # calico/cni:v1.11.2 # calico/kube-controllers:v1.0.3 # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Configure this with the location of your etcd cluster. etcd_endpoints: "https://${ETCD_SERVER_IP}:2379" # Configure the Calico backend to use. calico_backend: "bird" # The CNI network configuration to install on each node. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.1.0", "type": "calico", "etcd_endpoints": "__ETCD_ENDPOINTS__", "etcd_key_file": "__ETCD_KEY_FILE__", "etcd_cert_file": "__ETCD_CERT_FILE__", "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", "log_level": "info", "mtu": 1500, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s", "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } } # If you're using TLS enabled etcd uncomment the following. # You must also populate the Secret below with these files. etcd_ca: "/calico-secrets/etcd-ca" etcd_cert: "/calico-secrets/etcd-cert" etcd_key: "/calico-secrets/etcd-key" --- # The following contains k8s Secrets for use with a TLS enabled etcd cluster. # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ apiVersion: v1 kind: Secret type: Opaque metadata: name: calico-etcd-secrets namespace: kube-system data: # Populate the following files with etcd TLS configuration if desired, but leave blank if # not using TLS for etcd. # This self-hosted install expects three files with the following names. The values # should be base64 encoded strings of the entire contents of each file. etcd-key: ${ETCD_KEY} etcd-cert: ${ETCD_CERT} etcd-ca: ${ETCD_CA} --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node template: metadata: labels: k8s-app: calico-node annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, {"key":"CriticalAddonsOnly", "operator":"Exists"}] spec: hostNetwork: true serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: ${_prefix}node:${CALICO_TAG} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Disable file logging so 'kubectl logs' works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Configure the IP Pool from which Pod IPs will be chosen. - name: CALICO_IPV4POOL_CIDR value: ${CALICO_IPV4POOL} - name: CALICO_IPV4POOL_IPIP value: "off" - name: CALICO_IPV4POOL_NAT_OUTGOING value: "true" # Set noderef for node controller. - name: CALICO_K8S_NODE_REF valueFrom: fieldRef: fieldPath: spec.nodeName # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU value: "1440" # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert # Auto-detect the BGP IP address. - name: IP value: "" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: httpGet: path: /readiness port: 9099 periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /calico-secrets name: etcd-certs # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: ${_prefix}cni:${CALICO_CNI_TAG} command: ["/install-cni.sh"] env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir - mountPath: /calico-secrets name: etcd-certs volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Mount in the etcd TLS secrets. - name: etcd-certs secret: secretName: calico-etcd-secrets --- # This manifest deploys the Calico Kubernetes controllers. # See https://github.com/projectcalico/kube-controllers apiVersion: extensions/v1beta1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, {"key":"CriticalAddonsOnly", "operator":"Exists"}] spec: # The controllers can only have a single active instance. replicas: 1 strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: # The controllers must run in the host network namespace so that # it isn't governed by policy that would prevent it from working. hostNetwork: true serviceAccountName: calico-kube-controllers containers: - name: calico-kube-controllers image: ${_prefix}kube-controllers:${CALICO_KUBE_CONTROLLERS_TAG} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: policy,profile,workloadendpoint,node volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets name: etcd-certs volumes: # Mount in the etcd TLS secrets. - name: etcd-certs secret: secretName: calico-etcd-secrets --- # This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then # be removed entirely once the new kube-controllers deployment has been deployed above. apiVersion: extensions/v1beta1 kind: Deployment metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy spec: # Turn this deployment off in favor of the kube-controllers deployment above. replicas: 0 strategy: type: Recreate template: metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy spec: hostNetwork: true serviceAccountName: calico-kube-controllers containers: - name: calico-policy-controller image: ${_prefix}kube-controllers:${CALICO_KUBE_CONTROLLERS_TAG} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system # Calico Version v2.6.7 # https://docs.projectcalico.org/v2.6/releases#v2.6.7 --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers rules: - apiGroups: - "" - extensions resources: - pods - namespaces - networkpolicies - nodes verbs: - watch - list --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node rules: - apiGroups: [""] resources: - pods - nodes verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system EOF } until curl -sf "http://127.0.0.1:8080/healthz" do echo "Waiting for Kubernetes API..." sleep 5 done /usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh0000666000175100017510000000174113244017334033711 0ustar zuulzuul00000000000000#!/bin/sh -x . /etc/sysconfig/heat-params echo "Waiting for Kubernetes API..." until curl --silent "http://127.0.0.1:8080/version" do sleep 5 done cat <> ${KUBELET_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: kubernetes contexts: - context: cluster: kubernetes user: system:node:${HOSTNAME_OVERRIDE} name: default current-context: default kind: Config preferences: {} users: - name: system:node:${HOSTNAME_OVERRIDE} user: as-user-extra: {} client-certificate: ${CERT_DIR}/kubelet.crt client-key: ${CERT_DIR}/kubelet.key EOF cat << EOF >> ${PROXY_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: kubernetes contexts: - context: cluster: kubernetes user: kube-proxy name: default current-context: default kind: Config preferences: {} users: - name: kube-proxy user: as-user-extra: {} client-certificate: ${CERT_DIR}/proxy.crt client-key: ${CERT_DIR}/proxy.key EOF if [ "$TLS_DISABLED" = "True" ]; then sed -i 's/^.*user:$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*client-certificate.*$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*client-key.*$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*certificate-authority.*$//' ${KUBELET_KUBECONFIG} fi chmod 0644 ${KUBELET_KUBECONFIG} chmod 0644 ${PROXY_KUBECONFIG} sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"| /^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"| ' /etc/kubernetes/config # NOTE: Kubernetes plugin for Openstack requires that the node name registered # in the kube-apiserver be the same as the Nova name of the instance, so that # the plugin can use the name to query for attributes such as IP, etc. # The hostname of the node is set to be the Nova name of the instance, and # the option --hostname-override for kubelet uses the hostname to register the node. # Using any other name will break the load balancer and cinder volume features. mkdir -p /etc/kubernetes/manifests KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=4194 --kubeconfig ${KUBELET_KUBECONFIG} --hostname-override=${HOSTNAME_OVERRIDE}" KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" if [ -n "$TRUST_ID" ]; then KUBELET_ARGS="$KUBELET_ARGS --cloud-provider=openstack --cloud-config=/etc/kubernetes/kube_openstack_config" fi # Workaround for Cinder support (fixed in k8s >= 1.6) if [ ! -f /usr/bin/udevadm ]; then ln -s /sbin/udevadm /usr/bin/udevadm fi # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}pause:3.0" if [ -n "${INSECURE_REGISTRY_URL}" ]; then echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker fi # specified cgroup driver KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --cgroup-driver=systemd" cat > /etc/kubernetes/get_require_kubeconfig.sh <> $FLANNELD_CONFIG <> /etc/environment < Creates network resources for the cluster. allocate a network and router for our server. parameters: existing_network: type: string default: "" existing_subnet: type: string default: "" private_network_cidr: type: string description: network range for fixed ip network private_network_name: type: string description: fixed network name default: "" dns_nameserver: type: string description: address of a dns nameserver reachable in your environment external_network: type: string description: uuid/name of a network to use for floating ip addresses resources: private_network: type: Magnum::Optional::Neutron::Net properties: name: {get_param: private_network_name} private_subnet: type: Magnum::Optional::Neutron::Subnet properties: cidr: {get_param: private_network_cidr} network: {get_resource: private_network} dns_nameservers: - {get_param: dns_nameserver} extrouter: type: Magnum::Optional::Neutron::Router properties: external_gateway_info: network: {get_param: external_network} extrouter_inside: type: Magnum::Optional::Neutron::RouterInterface properties: router_id: {get_resource: extrouter} subnet: {get_resource: private_subnet} network_switch: type: Magnum::NetworkSwitcher properties: private_network: {get_resource: private_network} private_subnet: {get_resource: private_subnet} existing_network: {get_param: existing_network} existing_subnet: {get_param: existing_subnet} outputs: fixed_network: description: > Network ID where to provision machines value: {get_attr: [network_switch, network]} fixed_subnet: description: > Subnet ID where to provision machines value: {get_attr: [network_switch, subnet]} magnum-6.1.0/magnum/drivers/common/templates/swarm/0000775000175100017510000000000013244017675022420 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/0000775000175100017510000000000013244017675024406 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/enable-services.sh0000666000175100017510000000037513244017334030010 0ustar zuulzuul00000000000000#!/bin/sh set -x systemctl stop docker echo "starting services" systemctl daemon-reload for service in $NODE_SERVICES; do echo "activating service $service" systemctl enable $service systemctl --no-block start $service done setenforce 1 magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/write-network-config.sh0000666000175100017510000000061213244017334031017 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi FLANNEL_JSON=/etc/sysconfig/flannel-network.json # Generate a flannel configuration that we will # store into etcd using curl. cat > $FLANNEL_JSON < /etc/selinux/targeted/contexts/files/file_contexts.local restorecon -R /usr/local/bin # disable selinux until cloud-init is over # enabled again in enable-services.sh setenforce 0 magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/write-docker-socket.yaml0000666000175100017510000000076513244017334031161 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/systemd/system/docker.socket owner: "root:root" permissions: "0644" content: | [Unit] Description=Docker Socket for the API PartOf=docker.service After=docker-storage-setup.service Before=docker.service [Socket] ListenStream=/var/run/docker.sock SocketMode=0660 SocketUser=root SocketGroup=root [Install] WantedBy=sockets.target magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/network-service.sh0000666000175100017510000000610213244017334030062 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params CERT_DIR=/etc/docker PROTOCOL=https FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ -etcd-certfile $CERT_DIR/server.crt \ -etcd-keyfile $CERT_DIR/server.key" DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379 \ --cluster-store-opt kv.cacertfile=$CERT_DIR/ca.crt \ --cluster-store-opt kv.certfile=$CERT_DIR/server.crt \ --cluster-store-opt kv.keyfile=$CERT_DIR/server.key \ --cluster-advertise $SWARM_NODE_IP:9379" if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http FLANNEL_OPTIONS="" DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379 \ --cluster-advertise $SWARM_NODE_IP:9379" fi echo "Configuring ${NETWORK_DRIVER} network service ..." if [ "$NETWORK_DRIVER" == "docker" ]; then sed -i "/^DOCKER_NETWORK_OPTIONS=/ s#=.*#='$DOCKER_NETWORK_OPTIONS'#" \ /etc/sysconfig/docker-network fi if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi FLANNELD_CONFIG=/etc/sysconfig/flanneld FLANNEL_DOCKER_BRIDGE_BIN=/usr/local/bin/flannel-docker-bridge FLANNEL_DOCKER_BRIDGE_SERVICE=/etc/systemd/system/flannel-docker-bridge.service DOCKER_FLANNEL_CONF=/etc/systemd/system/docker.service.d/flannel.conf FLANNEL_DOCKER_BRIDGE_CONF=/etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf mkdir -p /etc/systemd/system/docker.service.d mkdir -p /etc/systemd/system/flanneld.service.d sed -i ' /^FLANNEL_ETCD=/ s|=.*|="'"$PROTOCOL"'://'"$ETCD_SERVER_IP"':2379"| ' $FLANNELD_CONFIG sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG cat >> $FLANNELD_CONFIG <> $FLANNEL_DOCKER_BRIDGE_BIN <&2 exit 1 fi # NOTE(mnaser): Since Docker 1.13, it does not set the default forwarding # policy to ACCEPT which will cause CNI networking to fail. iptables -P FORWARD ACCEPT mkdir -p /run/flannel/ cat > /run/flannel/docker <> $FLANNEL_DOCKER_BRIDGE_SERVICE <> $DOCKER_FLANNEL_CONF <> $FLANNEL_DOCKER_BRIDGE_CONF < /etc/systemd/system/swarm-manager.service << END_SERVICE_TOP [Unit] Description=Swarm Manager After=docker.service etcd.service Requires=docker.service etcd.service OnFailure=swarm-manager-failure.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill swarm-manager ExecStartPre=-/usr/bin/docker rm swarm-manager ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION ExecStart=/usr/bin/docker run --name swarm-manager \\ -v $CERT_DIR:$CERT_DIR:Z \\ -p 2376:2375 \\ -e http_proxy=$HTTP_PROXY \\ -e https_proxy=$HTTPS_PROXY \\ -e no_proxy=$NO_PROXY \\ swarm:$SWARM_VERSION \\ manage -H tcp://0.0.0.0:2375 \\ --strategy $SWARM_STRATEGY \\ --replication \\ --advertise $NODE_IP:2376 \\ END_SERVICE_TOP if [ $TLS_DISABLED = 'False' ]; then cat >> /etc/systemd/system/swarm-manager.service << END_TLS --tlsverify \\ --tlscacert=$CERT_DIR/ca.crt \\ --tlskey=$CERT_DIR/server.key \\ --tlscert=$CERT_DIR/server.crt \\ --discovery-opt kv.cacertfile=$CERT_DIR/ca.crt \\ --discovery-opt kv.certfile=$CERT_DIR/server.crt \\ --discovery-opt kv.keyfile=$CERT_DIR/server.key \\ END_TLS fi UUID=`uuidgen` cat >> /etc/systemd/system/swarm-manager.service << END_SERVICE_BOTTOM etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/ ExecStop=/usr/bin/docker stop swarm-manager Restart=always ExecStartPost=/usr/bin/$WAIT_CURL $VERIFY_CA \\ --data-binary '{"status": "SUCCESS", "reason": "Setup complete", "data": "OK", "id": "$UUID"}' [Install] WantedBy=multi-user.target END_SERVICE_BOTTOM chown root:root /etc/systemd/system/swarm-manager.service chmod 644 /etc/systemd/system/swarm-manager.service magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/write-heat-params-master.yaml0000666000175100017510000000225513244017334032113 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | WAIT_CURL="$WAIT_CURL" ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" SWARM_API_IP="$SWARM_API_IP" SWARM_NODE_IP="$SWARM_NODE_IP" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" TLS_DISABLED="$TLS_DISABLED" VERIFY_CA="$VERIFY_CA" NETWORK_DRIVER="$NETWORK_DRIVER" FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" FLANNEL_BACKEND="$FLANNEL_BACKEND" ETCD_SERVER_IP="$ETCD_SERVER_IP" API_IP_ADDRESS="$API_IP_ADDRESS" SWARM_VERSION="$SWARM_VERSION" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUST_ID="$TRUST_ID" AUTH_URL="$AUTH_URL" VOLUME_DRIVER="$VOLUME_DRIVER" REXRAY_PREEMPT="$REXRAY_PREEMPT" magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/configure-etcd.sh0000666000175100017510000000166713244017334027644 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params myip="$SWARM_NODE_IP" cert_dir="/etc/docker" protocol="https" if [ "$TLS_DISABLED" = "True" ]; then protocol="http" fi cat > /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf fi magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/network-config-service.sh0000666000175100017510000000365713244017334031341 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "Configuring ${NETWORK_DRIVER} network ..." if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi FLANNELD_CONFIG=/etc/sysconfig/flanneld FLANNEL_CONFIG_BIN=/usr/local/bin/flannel-config FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service FLANNEL_JSON=/etc/sysconfig/flannel-network.json CERT_DIR=/etc/docker PROTOCOL=https FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ -etcd-certfile $CERT_DIR/server.crt \ -etcd-keyfile $CERT_DIR/server.key" ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ --cert $CERT_DIR/server.crt --key $CERT_DIR/server.key" if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http FLANNEL_OPTIONS="" ETCD_CURL_OPTIONS="" fi sed -i ' /^FLANNEL_ETCD=/ s|=.*|="'"$PROTOCOL"'://'"$ETCD_SERVER_IP"':2379"| ' $FLANNELD_CONFIG sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG cat >> $FLANNELD_CONFIG < $FLANNEL_CONFIG_BIN <&2 exit 1 fi if ! [ "$FLANNEL_ETCD_ENDPOINTS" ] && [ "$FLANNEL_ETCD_PREFIX" ]; then echo "ERROR: missing required configuration" >&2 exit 1 fi echo "creating flanneld config in etcd" while ! curl -sf -L $ETCD_CURL_OPTIONS \ $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_PREFIX}/config \ -X PUT --data-urlencode value@${FLANNEL_JSON}; do echo "waiting for etcd" sleep 1 done EOF cat > $FLANNEL_CONFIG_SERVICE < $DOCKER_HTTP_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then cat < $DOCKER_HTTPS_PROXY_CONF [Service] Environment=HTTPS_PROXY=$HTTPS_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$HTTP_PROXY" -o -n "$HTTPS_PROXY" ]; then if [ -n "$NO_PROXY" ]; then cat < $DOCKER_NO_PROXY_CONF [Service] Environment=NO_PROXY=$NO_PROXY EOF DOCKER_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting no_proxy" fi fi fi if [ "$DOCKER_RESTART" -eq 1 ]; then systemctl daemon-reload systemctl --no-block restart docker.service fi magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/cfn-signal.sh0000666000175100017510000000056513244017334026763 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "notifying heat" if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi STATUS="SUCCESS" REASON="Setup complete" DATA="OK" UUID=`uuidgen` data=$(echo '{"status": "'${STATUS}'", "reason": "'$REASON'", "data": "'${DATA}'", "id": "'$UUID'"}') sh -c "${WAIT_CURL} ${VERIFY_CA} --data-binary '${data}'" magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/write-cluster-failure-service.yaml0000666000175100017510000000072113244017334033160 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/systemd/system/$SERVICE-failure.service owner: "root:root" permissions: "0644" content: | [Unit] Description=$SERVICE Failure Notifier [Service] Type=simple TimeoutStartSec=0 ExecStart=/usr/bin/$WAIT_CURL $VERIFY_CA \ --data-binary '{"status": "FAILURE", "reason": "$SERVICE service failed to start.", "data": "Failure"}' magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/remove-docker-key.sh0000666000175100017510000000010113244017334030254 0ustar zuulzuul00000000000000#!/bin/sh echo "removing docker key" rm -f /etc/docker/key.json magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/write-swarm-agent-service.sh0000666000175100017510000000454613244017334031760 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params myip="$SWARM_NODE_IP" if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi CONF_FILE=/etc/systemd/system/swarm-agent.service CERT_DIR=/etc/docker PROTOCOL=https ETCDCTL_OPTIONS="--ca-file $CERT_DIR/ca.crt \ --cert-file $CERT_DIR/server.crt \ --key-file $CERT_DIR/server.key" if [ $TLS_DISABLED = 'True' ]; then PROTOCOL=http ETCDCTL_OPTIONS="" fi cat > $CONF_FILE << EOF [Unit] Description=Swarm Agent After=docker.service Requires=docker.service OnFailure=swarm-agent-failure.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill swarm-agent ExecStartPre=-/usr/bin/docker rm swarm-agent ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\ -e https_proxy=$HTTPS_PROXY \\ -e no_proxy=$NO_PROXY \\ -v $CERT_DIR:$CERT_DIR:Z \\ --name swarm-agent \\ swarm:$SWARM_VERSION \\ join \\ --addr $myip:2375 \\ EOF if [ $TLS_DISABLED = 'False' ]; then cat >> /etc/systemd/system/swarm-agent.service << END_TLS --discovery-opt kv.cacertfile=$CERT_DIR/ca.crt \\ --discovery-opt kv.certfile=$CERT_DIR/server.crt \\ --discovery-opt kv.keyfile=$CERT_DIR/server.key \\ END_TLS fi cat >> /etc/systemd/system/swarm-agent.service << END_SERVICE_BOTTOM etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/ Restart=always ExecStop=/usr/bin/docker stop swarm-agent ExecStartPost=/usr/local/bin/notify-heat [Install] WantedBy=multi-user.target END_SERVICE_BOTTOM chown root:root $CONF_FILE chmod 644 $CONF_FILE SCRIPT=/usr/local/bin/notify-heat UUID=`uuidgen` cat > $SCRIPT << EOF #!/bin/sh until etcdctl \ --peers $PROTOCOL://$ETCD_SERVER_IP:2379 \ $ETCDCTL_OPTIONS --timeout 1s \ --total-timeout 5s \ ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375 do echo "Waiting for swarm agent registration..." sleep 5 done ${WAIT_CURL} {$VERIFY_CA} \ --data-binary '{"status": "SUCCESS", "reason": "Swarm agent ready", "data": "OK", "id": "${UUID}"}' EOF chown root:root $SCRIPT chmod 755 $SCRIPT magnum-6.1.0/magnum/drivers/common/templates/swarm/fragments/volume-service.sh0000666000175100017510000000355713244017334027713 0ustar zuulzuul00000000000000#!/bin/sh # Add rexray volume driver support for Swarm . /etc/sysconfig/heat-params set -e set -x # if no voulume driver is selected don't do any configuration if [ -z "$VOLUME_DRIVER" ]; then exit 0 fi mkdir -p /etc/rexray mkdir -p /var/log/rexray mkdir -p /var/run/rexray mkdir -p /var/lib/rexray REXRAY_CONFIG=/etc/rexray/config.yml # Add rexray configuration cat > $REXRAY_CONFIG < /etc/systemd/system/rexray.service < This is a template resource that accepts public and private IPs. It connects public ip address to its outputs, essentially acting as one state of a multiplexer. parameters: public_ip: type: string default: "" private_ip: type: string default: "" outputs: ip_address: value: {get_param: public_ip} magnum-6.1.0/magnum/drivers/common/templates/fragments/enable-docker-registry.sh0000666000175100017510000000033513244017334030145 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$REGISTRY_ENABLED" = "False" ]; then exit 0 fi echo "starting docker registry ..." systemctl daemon-reload systemctl enable registry systemctl --no-block start registry magnum-6.1.0/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml0000666000175100017510000000060213244017334033573 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs. It connects private ip address to its outputs, essentially acting as one state of a multiplexer. parameters: public_ip: type: string default: "" private_ip: type: string default: "" outputs: ip_address: value: {get_param: private_ip} magnum-6.1.0/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh0000666000175100017510000000316613244017334033554 0ustar zuulzuul00000000000000# This file contains docker storage drivers configuration for fedora # atomic hosts, as supported by Magnum. # * Remove any existing docker-storage configuration. In case of an # existing configuration, docker-storage-setup will fail. # * Remove docker storage graph clear_docker_storage () { # stop docker systemctl stop docker systemctl disable docker-storage-setup # clear storage graph rm -rf /var/lib/docker/* if [ -f /etc/sysconfig/docker-storage ]; then sed -i "/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=/" /etc/sysconfig/docker-storage fi } # Configure generic docker storage driver. configure_storage_driver_generic() { clear_docker_storage if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then mkfs.xfs -f ${device_path} echo "${device_path} /var/lib/docker xfs defaults 0 0" >> /etc/fstab mount -a fi echo "DOCKER_STORAGE_OPTIONS=\"--storage-driver $1\"" > /etc/sysconfig/docker-storage } # Configure docker storage with devicemapper using direct LVM configure_devicemapper () { clear_docker_storage echo "GROWROOT=True" > /etc/sysconfig/docker-storage-setup echo "STORAGE_DRIVER=devicemapper" >> /etc/sysconfig/docker-storage-setup if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then pvcreate -f ${device_path} vgcreate docker ${device_path} echo "VG=docker" >> /etc/sysconfig/docker-storage-setup else echo "ROOT_SIZE=5GB" >> /etc/sysconfig/docker-storage-setup echo "DATA_SIZE=95%FREE" >> /etc/sysconfig/docker-storage-setup fi docker-storage-setup } magnum-6.1.0/magnum/drivers/common/templates/fragments/configure-docker-registry.sh0000666000175100017510000000175013244017334030702 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$REGISTRY_ENABLED" = "False" ]; then exit 0 fi cat > /etc/sysconfig/registry-config.yml << EOF version: 0.1 log: fields: service: registry storage: cache: layerinfo: inmemory swift: authurl: "$AUTH_URL" region: "$SWIFT_REGION" username: "$TRUSTEE_USERNAME" password: "$TRUSTEE_PASSWORD" domainid: "$TRUSTEE_DOMAIN_ID" trustid: "$TRUST_ID" container: "$REGISTRY_CONTAINER" insecureskipverify: $REGISTRY_INSECURE chunksize: $REGISTRY_CHUNKSIZE http: addr: :5000 EOF cat > /etc/systemd/system/registry.service << EOF [Unit] Description=Docker registry v2 Requires=docker.service After=docker.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/docker run -d -p $REGISTRY_PORT:5000 --restart=always --name registry -v /etc/sysconfig/registry-config.yml:/etc/docker/registry/config.yml registry:2 ExecStop=/usr/bin/docker rm -f registry [Install] WantedBy=multi-user.target EOF magnum-6.1.0/magnum/drivers/common/templates/fragments/configure-docker-storage.sh0000666000175100017510000000221213244017334030470 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then if [ "$ENABLE_CINDER" == "False" ]; then # FIXME(yuanying): Use ephemeral disk for docker storage # Currently Ironic doesn't support cinder volumes, # so we must use preserved ephemeral disk instead of a cinder volume. device_path=$(readlink -f /dev/disk/by-label/ephemeral0) else attempts=60 while [ ${attempts} -gt 0 ]; do device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) if [ -n "${device_name}" ]; then break fi echo "waiting for disk device" sleep 0.5 udevadm trigger let attempts-- done if [ -z "${device_name}" ]; then echo "ERROR: disk device does not exist" >&2 exit 1 fi device_path=/dev/disk/by-id/${device_name} fi fi $configure_docker_storage_driver if [ "$DOCKER_STORAGE_DRIVER" = "devicemapper" ]; then configure_devicemapper else configure_storage_driver_generic $DOCKER_STORAGE_DRIVER fi magnum-6.1.0/magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml0000666000175100017510000000114113244017334031363 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs from both a Neutron LBaaS Pool and a master node. It connects the pool inputs to its outputs, essentially acting as one state of a multiplexer. parameters: pool_public_ip: type: string default: "" pool_private_ip: type: string default: "" master_public_ip: type: string default: "" master_private_ip: type: string default: "" outputs: public_ip: value: {get_param: pool_public_ip} private_ip: value: {get_param: pool_private_ip} magnum-6.1.0/magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml0000666000175100017510000000114713244017334031713 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs from both a Neutron LBaaS Pool and a master node. It connects the master inputs to its outputs, essentially acting as one state of a multiplexer. parameters: pool_public_ip: type: string default: "" pool_private_ip: type: string default: "" master_public_ip: type: string default: "" master_private_ip: type: string default: "" outputs: public_ip: value: {get_param: master_public_ip} private_ip: value: {get_param: master_private_ip} magnum-6.1.0/magnum/drivers/common/templates/fragments/atomic-install-openstack-ca.sh0000666000175100017510000000035213244017334031071 0ustar zuulzuul00000000000000#!/bin/sh -ux CA_FILE=/etc/pki/ca-trust/source/anchors/openstack-ca.pem if [ -n "$OPENSTACK_CA" ] ; then cat >> $CA_FILE < This is a nested stack that defines a single Kubernetes minion, This stack is included by an AutoScalingGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server minion_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. flannel_network_cidr: type: string description: network range for flannel overlay network kube_software_configs: type: string description : > ID of the multipart mime. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: ###################################################################### # # a single kubernetes minion. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-minion: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_param: kube_software_configs} networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} kube_minion_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network security_groups: - {get_param: secgroup_kube_minion_id} fixed_ips: - subnet_id: get_param: fixed_subnet allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} kube_minion_floating: type: OS::Neutron::FloatingIP properties: floating_network: get_param: external_network port_id: get_resource: kube_minion_eth0 outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. OS::stack_id: value: {get_param: "OS::stack_id"} description: > This is a id of the stack which creates from this template. magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml0000666000175100017510000005100313244017334027202 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Kubernetes cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1). parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public fixed_network: type: string description: > name of private network into which servers get deployed Important: the Load Balancer feature in Kubernetes requires that the name for the fixed_network must be "private" for the address lookup in Kubernetes to work properly fixed_subnet: type: string description: Sub network from which to allocate fixed addresses. default: private-subnet server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server minion_flavor: type: string default: baremetal description: flavor to use when booting the server prometheus_monitoring: type: boolean default: false description: > whether or not to have the grafana-prometheus-cadvisor monitoring setup grafana_admin_passwd: type: string default: admin hidden: true description: > admin user password for the Grafana monitoring interface dns_nameserver: type: string description: address of a dns nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 network_driver: type: string description: network driver to use for instantiating container networks default: flannel flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] etcd_volume_size: type: number description: > size of the cinder volume for etcd storage default: 0 docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing an create. default: [] discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 swift_region: type: string description: region of swift service default: "" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 volume_driver: type: string description: volume driver to use for container storage default: "" region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file default: ChangeMe hidden: true loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean description: whether or not to disable TLS default: False kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard default: True influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster default: False verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster default: v1.9.3 kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.9.3 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster default: v1.5.1 insecure_registry_url: type: string description: insecure registry url default: "" container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc constraints: - allowed_pattern: "^$|.*/" default: "" openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] resources: api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_param: fixed_subnet} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: kubernetes_port} etcd_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_param: fixed_subnet} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: 2379 ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_base: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 secgroup_kube_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 - protocol: tcp port_range_min: 30000 port_range_max: 32767 secgroup_kube_minion: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_lb, address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # kubernetes masters. This is a resource group that will create # masters. # kube_masters: type: OS::Heat::ResourceGroup properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] prometheus_monitoring: {get_param: prometheus_monitoring} grafana_admin_passwd: {get_param: grafana_admin_passwd} api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} docker_volume_size: {get_param: docker_volume_size} docker_storage_driver: {get_param: docker_storage_driver} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} admission_control_list: {get_param: admission_control_list} discovery_url: {get_param: discovery_url} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} fixed_network: {get_param: fixed_network} fixed_subnet: {get_param: fixed_subnet} api_pool_id: {get_attr: [api_lb, pool_id]} etcd_pool_id: {get_attr: [etcd_lb, pool_id]} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_base_id: {get_resource: secgroup_base} secgroup_kube_master_id: {get_resource: secgroup_kube_master} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} kube_dashboard_version: {get_param: kube_dashboard_version} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} wc_curl_cli: {get_attr: [master_wait_handle, curl_cli]} etcd_lb_vip: {get_attr: [etcd_lb, address]} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} ###################################################################### # # wait condition handler for kubernetes master # master_wait_handle: type: OS::Heat::WaitConditionHandle master_wait_condition: type: OS::Heat::WaitCondition properties: count: {get_param: number_of_masters} handle: {get_resource: master_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: type: OS::Heat::ResourceGroup properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'minion', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_param: fixed_network} fixed_subnet: {get_param: fixed_subnet} secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} flannel_network_cidr: {get_param: flannel_network_cidr} external_network: {get_param: external_network} kube_software_configs: {get_attr: [kubeminion_software_configs, kube_minion_init]} nodes_server_group_id: {get_resource: nodes_server_group} ###################################################################### # # Software configs for kubernetes minions # kubeminion_software_configs: type: kubeminion_software_configs.yaml properties: prometheus_monitoring: {get_param: prometheus_monitoring} network_driver: {get_param: network_driver} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} kube_allow_priv: {get_param: kube_allow_priv} docker_volume_size: {get_param: docker_volume_size} docker_storage_driver: {get_param: docker_storage_driver} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} swift_region: {get_param: swift_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} wc_curl_cli: {get_attr: [minion_wait_handle, curl_cli]} openstack_ca: {get_param: openstack_ca} ###################################################################### # # wait condition handler for kubernetes minions # minion_wait_handle: type: OS::Heat::WaitConditionHandle minion_wait_condition: type: OS::Heat::WaitCondition properties: count: {get_param: number_of_minions} handle: {get_resource: minion_wait_handle} timeout: {get_param: wait_condition_timeout} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. registry_address: value: str_replace: template: localhost:port params: port: {get_param: registry_port} description: This is the url of docker registry server where you can store docker images. kube_masters_private: value: {get_attr: [kube_masters, kube_master_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes masters. kube_masters: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions_private: value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions: value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml0000666000175100017510000004146713244017334027031 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes master, This stack is included by an ResourceGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean description: whether or not to enable TLS kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have prometheus and grafana deployed grafana_admin_passwd: type: string hidden: true description: > admin user password for the Grafana monitoring interface api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks secgroup_base_id: type: string description: ID of the security group for base. secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. auth_url: type: string description: > url for kubernetes to authenticate username: type: string description: > user account password: type: string description: > user password http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc wc_curl_cli: type: string description : > Wait condition notify command for Master. etcd_lb_vip: type: string description: > etcd lb vip private used to generate certs on master. default: "" openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: ###################################################################### # # resource that exposes the IPs of either the kube master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_public_address} pool_private_ip: {get_param: api_private_address} master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} params: "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$DOCKER_VOLUME": 'None' "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$VERIFY_CA": {get_param: verify_ca} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$KUBE_TAG": {get_param: kube_tag} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$WAIT_CURL": {get_param: wc_curl_cli} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} "$ENABLE_CINDER": "False" "$ETCD_LB_VIP": {get_param: etcd_lb_vip} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/make-cert.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh} write_kube_os_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh} write_network_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/write-network-config.sh} network_config_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/network-config-service.sh} enable_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} enable_kube_controller_manager_scheduler: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh} kube_apiserver_to_kubelet_role: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh} kube_ui_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} enable_kube_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-master.sh} master_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh} disable_selinux: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} start_container_agent: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} kube_master_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: disable_selinux} - config: {get_resource: write_heat_params} - config: {get_resource: configure_etcd} - config: {get_resource: write_kube_os_config} - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_kubernetes} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: write_network_config} - config: {get_resource: network_config_service} - config: {get_resource: network_service} - config: {get_resource: kube_apiserver_to_kubelet_role} - config: {get_resource: enable_kube_controller_manager_scheduler} - config: {get_resource: enable_kube_proxy} - config: {get_resource: kube_ui_service} - config: {get_resource: start_container_agent} - config: {get_resource: master_wc_notify} enable_prometheus_monitoring: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring} params: "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} enable_prometheus_monitoring_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: enable_prometheus_monitoring} server: {get_resource: kube-master} actions: ['CREATE'] ###################################################################### # # a single kubernetes master. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-master: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT user_data: {get_resource: kube_master_init} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} kube_master_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network security_groups: - {get_param: secgroup_kube_master_id} fixed_ips: - subnet_id: get_param: fixed_subnet allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} kube_master_floating: type: OS::Neutron::FloatingIP properties: floating_network: get_param: external_network port_id: get_resource: kube_master_eth0 api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 outputs: kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion_software_configs.yaml0000666000175100017510000002466013244017334032445 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines software configs for Kubernetes minions. parameters: kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" tls_disabled: type: boolean description: whether or not to enable TLS verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have the node-exporter running on the node kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. network_driver: type: string description: network driver to use for instantiating container networks registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. registry_port: type: number description: port of registry service swift_region: type: string description: region of swift service registry_container: type: string description: > name of swift container which docker registry stores images in registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file hidden: true http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster kube_version: type: string description: version of kubernetes used for kubernetes cluster trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: > url for keystone, must be v2 since k8s backend only support v2 at this point insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc wc_curl_cli: type: string description : > Wait condition notify command for Minion. openstack_ca: type: string description: The OpenStack CA certificate to install on the node. resources: ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.yaml} params: $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} $KUBE_MASTER_IP: {get_param: kube_master_ip} $KUBE_API_PORT: {get_param: kubernetes_port} $ETCD_SERVER_IP: {get_param: etcd_server_ip} $DOCKER_VOLUME: 'None' $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} $NETWORK_DRIVER: {get_param: network_driver} $REGISTRY_ENABLED: {get_param: registry_enabled} $REGISTRY_PORT: {get_param: registry_port} $SWIFT_REGION: {get_param: swift_region} $REGISTRY_CONTAINER: {get_param: registry_container} $REGISTRY_INSECURE: {get_param: registry_insecure} $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} $TLS_DISABLED: {get_param: tls_disabled} $VERIFY_CA: {get_param: verify_ca} $CLUSTER_UUID: {get_param: cluster_uuid} $MAGNUM_URL: {get_param: magnum_url} $USERNAME: {get_param: username} $PASSWORD: {get_param: password} $VOLUME_DRIVER: {get_param: volume_driver} $REGION_NAME: {get_param: region_name} $HTTP_PROXY: {get_param: http_proxy} $HTTPS_PROXY: {get_param: https_proxy} $NO_PROXY: {get_param: no_proxy} $KUBE_TAG: {get_param: kube_tag} $KUBE_VERSION: {get_param: kube_version} $WAIT_CURL: {get_param: wc_curl_cli} $TRUSTEE_DOMAIN_ID: {get_param: trustee_domain_id} $TRUSTEE_USER_ID: {get_param: trustee_user_id} $TRUSTEE_USERNAME: {get_param: trustee_username} $TRUSTEE_PASSWORD: {get_param: trustee_password} $TRUST_ID: {get_param: trust_id} $AUTH_URL: {get_param: auth_url} $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} $ENABLE_CINDER: "False" install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} configure_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} configure_kubernetes_minion: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh} network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} enable_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh} enable_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} enable_kube_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh} enable_node_exporter: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-node-exporter.sh} minion_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v if [ "verify_ca" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi wc_notify $VERIFY_CA --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_param: wc_curl_cli} verify_ca: {get_param: verify_ca} disable_selinux: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} kube_minion_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: disable_selinux} - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_docker_registry} - config: {get_resource: configure_kubernetes_minion} - config: {get_resource: network_service} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: enable_kube_proxy} - config: {get_resource: enable_node_exporter} - config: {get_resource: enable_docker_registry} - config: {get_resource: minion_wc_notify} outputs: kube_minion_init: value: {get_resource: kube_minion_init} description: ID of the multipart mime for kubeminion. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/0000775000175100017510000000000013244017675021137 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/version.py0000666000175100017510000000125413244017334023172 0ustar zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'mesos_ubuntu_v1' container_version = '1.9.1' magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/scale_manager.py0000666000175100017510000000300213244017334024257 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from marathon import MarathonClient from magnum.conductor.scale_manager import ScaleManager class MesosScaleManager(ScaleManager): """When scaling a mesos cluster, MesosScaleManager will inspect the nodes and find out those with containers on them. Thus we can ask Heat to delete the nodes without containers. Note that this is a best effort basis -- Magnum doesn't have any synchronization with Marathon, so while Magnum is checking for the containers to choose nodes to remove, new containers can be deployed on the nodes to be removed. """ def __init__(self, context, osclient, cluster): super(MesosScaleManager, self).__init__(context, osclient, cluster) def _get_hosts_with_container(self, context, cluster): marathon_client = MarathonClient( 'http://' + cluster.api_address + ':8080') hosts = set() for task in marathon_client.list_tasks(): hosts.add(task.host) return hosts magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/driver.py0000666000175100017510000000247313244017334023004 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.heat import driver from magnum.drivers.mesos_ubuntu_v1 import monitor from magnum.drivers.mesos_ubuntu_v1.scale_manager import MesosScaleManager from magnum.drivers.mesos_ubuntu_v1 import template_def class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'ubuntu', 'coe': 'mesos'}, ] def get_template_definition(self): return template_def.UbuntuMesosTemplateDefinition() def get_monitor(self, context, cluster): return monitor.MesosMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): return MesosScaleManager(context, osclient, cluster) magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/0000775000175100017510000000000013244017675022221 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/validate_image.sh0000777000175100017510000000145413244017334025511 0ustar zuulzuul00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # check that image is valid qemu-img check -q $1 # validate estimated size FILESIZE=$(stat -c%s "$1") MIN_SIZE=681574400 # 650MB if [ $FILESIZE -lt $MIN_SIZE ] ; then echo "Error: generated image size is lower than expected." exit 1 fi magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/0000775000175100017510000000000013244017675023347 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/0000775000175100017510000000000013244017675026023 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/10-apt-repo0000777000175100017510000000104113244017334027704 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') CODENAME=$(lsb_release -cs) # Add the repository echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \ sudo tee /etc/apt/sources.list.d/mesosphere.list # Install Java 8 requirements for marathon sudo add-apt-repository -y ppa:openjdk-r/ppa sudo apt-get -y update sudo apt-get -y install openjdk-8-jre-headless magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/0000775000175100017510000000000013244017675026222 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/60-disable-upstart0000777000175100017510000000027513244017334031474 0ustar zuulzuul00000000000000#!/bin/bash for service in zookeeper mesos-slave mesos-master marathon; do service $service stop [ -f /etc/init/$service.conf ] && echo "manual" > /etc/init/$service.override done magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/package-installs.yaml0000666000175100017510000000003513244017334027445 0ustar zuulzuul00000000000000zookeeperd: mesos: marathon: magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/mesos/elements-deps0000666000175100017510000000002113244017334026022 0ustar zuulzuul00000000000000package-installs magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/0000775000175100017510000000000013244017675023470 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/0000775000175100017510000000000013244017675026144 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/10-add-docker-repo0000777000175100017510000000074213244017334031245 0ustar zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \ --recv-keys 58118E89F3A912897C070ADBF76221572C52609D DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') RELEASE=$(lsb_release -ics | tail -1 | tr '[:upper:]' '[:lower:]') # Add the repository echo "deb http://apt.dockerproject.org/repo ${DISTRO}-${RELEASE} main" | \ sudo tee /etc/apt/sources.list.d/docker.list magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/0000775000175100017510000000000013244017675026343 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/60-disable-docker-service0000777000175100017510000000015313244017334033013 0ustar zuulzuul00000000000000#!/bin/bash service docker stop [ -f /etc/init/docker.conf ] && echo "manual" > /etc/init/docker.override magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/package-installs.yaml0000666000175100017510000000001713244017334027566 0ustar zuulzuul00000000000000docker-engine: magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/docker/elements-deps0000666000175100017510000000002113244017334026143 0ustar zuulzuul00000000000000package-installs magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/README.md0000666000175100017510000000023113244017334023466 0ustar zuulzuul00000000000000Mesos elements ============== See [Building an image](http://docs.openstack.org/developer/magnum/userguide.html#building-mesos-image) for instructions. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/Dockerfile0000666000175100017510000000177513244017334024217 0ustar zuulzuul00000000000000FROM ubuntu:trusty RUN \ apt-get -yqq update && \ apt-get -yqq install git qemu-utils python-dev python-pip python-yaml python-six uuid-runtime curl sudo kpartx parted wget && \ pip install diskimage-builder && \ mkdir /output WORKDIR /build ENV PATH="dib-utils/bin:$PATH" ELEMENTS_PATH="$(python -c 'import os, diskimage_builder, pkg_resources;print(os.path.abspath(pkg_resources.resource_filename(diskimage_builder.__name__, "elements")))'):tripleo-image-elements/elements:heat-templates/hot/software-config/elements:magnum/magnum/drivers/mesos_ubuntu_v1/image" DIB_RELEASE=trusty RUN git clone https://git.openstack.org/openstack/magnum RUN git clone https://git.openstack.org/openstack/dib-utils.git RUN git clone https://git.openstack.org/openstack/tripleo-image-elements.git RUN git clone https://git.openstack.org/openstack/heat-templates.git CMD disk-image-create ubuntu vm docker mesos os-collect-config os-refresh-config os-apply-config heat-config heat-config-script -o /output/ubuntu-mesos.qcow2 magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/image/install_imagebuild_deps.sh0000777000175100017510000000061413244017334027416 0ustar zuulzuul00000000000000#!/bin/bash # This script installs all needed dependencies to generate # images using diskimage-builder. Please not it only has been # tested on Ubuntu Trusty set -eux set -o pipefail sudo apt-get update || true sudo apt-get install -y \ git \ qemu-utils \ python-dev \ python-yaml \ python-six \ uuid-runtime \ curl \ sudo \ kpartx \ parted \ wget magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/monitor.py0000666000175100017510000000510313244017334023171 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from magnum.common import urlfetch from magnum.conductor import monitors class MesosMonitor(monitors.MonitorBase): def __init__(self, context, cluster): super(MesosMonitor, self).__init__(context, cluster) self.data = {} @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, 'cpu_util': { 'unit': '%', 'func': 'compute_cpu_util', }, } def _build_url(self, url, protocol='http', port='80', path='/'): return protocol + '://' + url + ':' + port + path def _is_leader(self, state): return state['leader'] == state['pid'] def pull_data(self): self.data['mem_total'] = 0 self.data['mem_used'] = 0 self.data['cpu_total'] = 0 self.data['cpu_used'] = 0 for master_addr in self.cluster.master_addresses: mesos_master_url = self._build_url(master_addr, port='5050', path='/state') master = jsonutils.loads(urlfetch.get(mesos_master_url)) if self._is_leader(master): for slave in master['slaves']: self.data['mem_total'] += slave['resources']['mem'] self.data['mem_used'] += slave['used_resources']['mem'] self.data['cpu_total'] += slave['resources']['cpus'] self.data['cpu_used'] += slave['used_resources']['cpus'] break def compute_memory_util(self): if self.data['mem_total'] == 0 or self.data['mem_used'] == 0: return 0 else: return self.data['mem_used'] * 100 / self.data['mem_total'] def compute_cpu_util(self): if self.data['cpu_total'] == 0 or self.data['cpu_used'] == 0: return 0 else: return self.data['cpu_used'] * 100 / self.data['cpu_total'] magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/template_def.py0000666000175100017510000001010213244017334024126 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from magnum.drivers.heat import template_def class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition): """Mesos template for Ubuntu VM.""" def __init__(self): super(UbuntuMesosTemplateDefinition, self).__init__() self.add_parameter('external_network', cluster_template_attr='external_network_id', required=True) self.add_parameter('fixed_network', cluster_template_attr='fixed_network') self.add_parameter('fixed_subnet', cluster_template_attr='fixed_subnet') self.add_parameter('number_of_slaves', cluster_attr='node_count') self.add_parameter('master_flavor', cluster_attr='master_flavor_id') self.add_parameter('slave_flavor', cluster_attr='flavor_id') self.add_parameter('cluster_name', cluster_attr='name') self.add_parameter('volume_driver', cluster_template_attr='volume_driver') self.add_output('api_address', cluster_attr='api_address') self.add_output('mesos_master_private', cluster_attr=None) self.add_output('mesos_master', cluster_attr='master_addresses') self.add_output('mesos_slaves_private', cluster_attr=None) self.add_output('mesos_slaves', cluster_attr='node_addresses') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) # HACK(apmelton) - This uses the user's bearer token, ideally # it should be replaced with an actual trust token with only # access to do what the template needs it to do. osc = self.get_osc(context) extra_params['auth_url'] = context.auth_url extra_params['username'] = context.user_name extra_params['tenant_name'] = context.project_id extra_params['domain_name'] = context.domain_name extra_params['region_name'] = osc.cinder_region_name() label_list = ['rexray_preempt', 'mesos_slave_isolation', 'mesos_slave_image_providers', 'mesos_slave_work_dir', 'mesos_slave_executor_env_variables'] for label in label_list: extra_params[label] = cluster.labels.get(label) scale_mgr = kwargs.pop('scale_manager', None) if scale_mgr: hosts = self.get_output('mesos_slaves_private') extra_params['slaves_to_remove'] = ( scale_mgr.get_removal_nodes(hosts)) return super(UbuntuMesosTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def get_env_files(self, cluster_template, cluster): env_files = [] template_def.add_priv_net_env_file(env_files, cluster_template) template_def.add_lb_env_file(env_files, cluster_template) return env_files @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/mesoscluster.yaml') magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/__init__.py0000666000175100017510000000000013244017334023230 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/0000775000175100017510000000000013244017675023135 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/mesosmaster.yaml0000666000175100017510000000704413244017334026362 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Mesos master, This stack is included by a ResourceGroup resource in the parent template (mesoscluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. secgroup_mesos_id: type: string description: ID of the security group for mesos master. api_pool_id: type: string description: ID of the load balancer pool of Marathon. openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: add_ext_ca_certs: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: template: {get_file: fragments/add-ext-ca-certs.sh} params: "@@CACERTS_CONTENT@@": {get_param: openstack_ca} mesos_master_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: add_ext_ca_certs} ###################################################################### # # Mesos master server. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems mesos-master: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: SOFTWARE_CONFIG user_data: {get_resource: mesos_master_init} networks: - port: {get_resource: mesos_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} mesos_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_mesos_id} fixed_ips: - subnet: {get_param: fixed_subnet} replacement_policy: AUTO mesos_master_floating: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: mesos_master_eth0} api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [mesos_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 8080 outputs: mesos_master_ip: value: {get_attr: [mesos_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the Mesos master node. mesos_master_external_ip: value: {get_attr: [mesos_master_floating, floating_ip_address]} description: > This is the "public" address of the Mesos master node. mesos_server_id: value: {get_resource: mesos-master} description: > This is the logical id of the Mesos master node. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/mesoscluster.yaml0000666000175100017510000003652513244017334026556 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Mesos cluster with one or more masters (as specified by number_of_masters, default is 1) and one or more slaves (as specified by the number_of_slaves parameter, which defaults to 1). parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" server_image: type: string default: ubuntu-mesos description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the master server slave_flavor: type: string default: m1.small description: flavor to use when booting the slave server dns_nameserver: type: string description: address of a dns nameserver reachable in your environment default: 8.8.8.8 number_of_slaves: type: number description: how many mesos slaves to spawn initially default: 1 fixed_network_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 cluster_name: type: string description: human readable name for the mesos cluster default: my-cluster executor_registration_timeout: type: string description: > Amount of time to wait for an executor to register with the slave before considering it hung and shutting it down default: 5mins number_of_masters: type: number description: how many mesos masters to spawn initially default: 1 http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true region_name: type: string description: a logically separate section of the cluster username: type: string description: user name password: type: string description: > user password, not set in current implementation, only used to fill in for Mesos config file default: password hidden: true tenant_name: type: string description: > tenant_name is used to isolate access to Compute resources volume_driver: type: string description: volume driver to use for container storage default: "" domain_name: type: string description: > domain is to define the administrative boundaries for management of Keystone entities rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" auth_url: type: string description: url for keystone mesos_slave_isolation: type: string description: > Isolation mechanisms to use, e.g., `posix/cpu,posix/mem`, or `cgroups/cpu,cgroups/mem`, or network/port_mapping (configure with flag: `--with-network-isolator` to enable), or `cgroups/devices/gpus/nvidia` for nvidia specific gpu isolation (configure with flag: `--enable-nvidia -gpu-support` to enable), or `external`, or load an alternate isolator module using the `--modules` flag. Note that this flag is only relevant for the Mesos Containerizer. default: "" mesos_slave_work_dir: type: string description: directory path to place framework work directories default: "" mesos_slave_image_providers: type: string description: > Comma separated list of supported image providers e.g., APPC,DOCKER default: "" mesos_slave_executor_env_variables: type: string description: > JSON object representing the environment variables that should be passed to the executor, and thus subsequently task(s). By default the executor, executor will inherit the slave's environment variables. default: "" slaves_to_remove: type: comma_delimited_list description: > List of slaves to be removed when doing an update. Individual slave may be referenced several ways: (1) The resource name (e.g.['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing a create. default: [] verify_ca: type: boolean description: whether or not to validate certificate authority openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] resources: ###################################################################### # # network resources. allocate a network and router for our server. # network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: HTTP port: 8080 ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp remote_mode: remote_group_id - protocol: tcp port_range_min: 5050 port_range_max: 5050 - protocol: tcp port_range_min: 8080 port_range_max: 8080 secgroup_slave_all_open: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # Master SoftwareConfig. # write_params_master: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/write-heat-params-master.sh} inputs: - name: MESOS_MASTERS_IPS type: String - name: CLUSTER_NAME type: String - name: QUORUM type: String - name: HTTP_PROXY type: String - name: HTTPS_PROXY type: String - name: NO_PROXY type: String configure_master: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/configure-mesos-master.sh} add_proxy_master: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/add-proxy.sh} start_services_master: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: fragments/start-services-master.sh} ###################################################################### # # Master SoftwareDeployment. # write_params_master_deployment: type: OS::Heat::SoftwareDeploymentGroup properties: config: {get_resource: write_params_master} servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} input_values: MESOS_MASTERS_IPS: {list_join: [' ', {get_attr: [mesos_masters, mesos_master_ip]}]} CLUSTER_NAME: {get_param: cluster_name} NUMBER_OF_MASTERS: {get_param: number_of_masters} HTTP_PROXY: {get_param: http_proxy} HTTPS_PROXY: {get_param: https_proxy} NO_PROXY: {get_param: no_proxy} configure_master_deployment: type: OS::Heat::SoftwareDeploymentGroup depends_on: - write_params_master_deployment properties: config: {get_resource: configure_master} servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} add_proxy_master_deployment: type: OS::Heat::SoftwareDeploymentGroup depends_on: - configure_master_deployment properties: config: {get_resource: add_proxy_master} servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} start_services_master_deployment: type: OS::Heat::SoftwareDeploymentGroup depends_on: - add_proxy_master_deployment properties: config: {get_resource: start_services_master} servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} ###################################################################### # # resources that expose the IPs of either the mesos master or a given # LBaaS pool depending on whether LBaaS is enabled for the bay. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [mesos_masters, resource.0.mesos_master_external_ip]} master_private_ip: {get_attr: [mesos_masters, resource.0.mesos_master_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # Mesos masters. This is a resource group that will create # masters. # mesos_masters: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_masters} resource_def: type: mesosmaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} secgroup_mesos_id: {get_resource: secgroup_master} api_pool_id: {get_attr: [api_lb, pool_id]} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} ###################################################################### # # Mesos slaves. This is a resource group that will initially # create slaves, and needs to be manually scaled. # mesos_slaves: type: OS::Heat::ResourceGroup depends_on: - network properties: count: {get_param: number_of_slaves} removal_policies: [{resource_list: {get_param: slaves_to_remove}}] resource_def: type: mesosslave.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'slave', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} slave_flavor: {get_param: slave_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} secgroup_slave_all_open_id: {get_resource: secgroup_slave_all_open} mesos_slave_software_configs: {get_attr: [mesos_slave_software_configs, mesos_init]} nodes_server_group_id: {get_resource: nodes_server_group} ###################################################################### # # Wait condition handler for Mesos slaves. # slave_wait_handle: type: OS::Heat::WaitConditionHandle slave_wait_condition: type: OS::Heat::WaitCondition properties: count: {get_param: number_of_slaves} handle: {get_resource: slave_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # Software configs for Mesos slaves. # mesos_slave_software_configs: type: mesos_slave_software_configs.yaml properties: mesos_masters_ips: {list_join: [' ', {get_attr: [mesos_masters, mesos_master_ip]}]} executor_registration_timeout: {get_param: executor_registration_timeout} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} tenant_name: {get_param: tenant_name} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} domain_name: {get_param: domain_name} rexray_preempt: {get_param: rexray_preempt} mesos_slave_isolation: {get_param: mesos_slave_isolation} mesos_slave_work_dir: {get_param: mesos_slave_work_dir} mesos_slave_image_providers: {get_param: mesos_slave_image_providers} mesos_slave_executor_env_variables: {get_param: mesos_slave_executor_env_variables} mesos_slave_wc_curl_cli: {get_attr: [slave_wait_handle, curl_cli]} verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} outputs: api_address: value: {get_attr: [api_address_lb_switch, public_ip]} description: > This is the API endpoint of the Mesos master. Use this to access the Mesos API from outside the cluster. mesos_master_private: value: {get_attr: [mesos_masters, mesos_master_ip]} description: > This is a list of the "private" addresses of all the Mesos masters. mesos_master: value: {get_attr: [mesos_masters, mesos_master_external_ip]} description: > This is the "public" ip address of the Mesos master server. Use this address to log in to the Mesos master via ssh or to access the Mesos API from outside the cluster. mesos_slaves_private: value: {get_attr: [mesos_slaves, mesos_slave_ip]} description: > This is a list of the "private" addresses of all the Mesos slaves. mesos_slaves: value: {get_attr: [mesos_slaves, mesos_slave_external_ip]} description: > This is a list of the "public" addresses of all the Mesos slaves. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/0000775000175100017510000000000013244017675025123 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/write-heat-params.yaml0000666000175100017510000000143713244017334031340 0ustar zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0600" content: | MESOS_MASTERS_IPS="$MESOS_MASTERS_IPS" EXECUTOR_REGISTRATION_TIMEOUT="$EXECUTOR_REGISTRATION_TIMEOUT" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" AUTH_URL="$AUTH_URL" USERNAME="$USERNAME" PASSWORD="$PASSWORD" TENANT_NAME="$TENANT_NAME" VOLUME_DRIVER="$VOLUME_DRIVER" REGION_NAME="$REGION_NAME" DOMAIN_NAME="$DOMAIN_NAME" REXRAY_PREEMPT="$REXRAY_PREEMPT" ISOLATION="$ISOLATION" WORK_DIR="$WORK_DIR" IMAGE_PROVIDERS="$IMAGE_PROVIDERS" EXECUTOR_ENVIRONMENT_VARIABLES="$EXECUTOR_ENVIRONMENT_VARIABLES" magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-ext-ca-certs.sh0000666000175100017510000000144713244017334030504 0ustar zuulzuul00000000000000#!/bin/sh CACERTS=$(cat <<-EOF @@CACERTS_CONTENT@@ EOF ) CA_FILE=/usr/local/share/ca-certificates/magnum-external.crt if [ -n "$CACERTS" ]; then touch $CA_FILE echo "$CACERTS" | tee -a $CA_FILE chmod 0644 $CA_FILE chown root:root $CA_FILE update-ca-certificates # Legacy versions of requests shipped with os-collect-config can have own CA cert database for REQUESTS_LOCATION in \ /opt/stack/venvs/os-collect-config/lib/python2.7/site-packages/requests \ /usr/local/lib/python2.7/dist-packages/requests; do if [ -f "${REQUESTS_LOCATION}/cacert.pem" ]; then echo "$CACERTS" | tee -a "${REQUESTS_LOCATION}/cacert.pem" fi done if [ -f /etc/init/os-collect-config.conf ]; then service os-collect-config restart fi fi magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-master.sh0000666000175100017510000000311713244017334032051 0ustar zuulzuul00000000000000#!/bin/bash . /etc/sysconfig/heat-params echo "Configuring mesos (master)" myip=$(ip addr show eth0 | awk '$1 == "inet" {print $2}' | cut -f1 -d/) # Fix /etc/hosts sed -i "s/127.0.1.1/$myip/" /etc/hosts ###################################################################### # # Configure ZooKeeper # # List all ZooKeeper nodes id=1 for master_ip in $MESOS_MASTERS_IPS; do echo "server.$((id++))=${master_ip}:2888:3888" >> /etc/zookeeper/conf/zoo.cfg done # Set a ID for this node id=1 for master_ip in $MESOS_MASTERS_IPS; do if [ "$master_ip" = "$myip" ]; then break fi id=$((id+1)) done echo "$id" > /etc/zookeeper/conf/myid ###################################################################### # # Configure Mesos # # Set the ZooKeeper URL zk="zk://" for master_ip in $MESOS_MASTERS_IPS; do zk="${zk}${master_ip}:2181," done # Remove tailing ',' (format: zk://host1:port1,...,hostN:portN/path) zk=${zk::-1} echo "${zk}/mesos" > /etc/mesos/zk # The IP address to listen on echo "$myip" > /etc/mesos-master/ip # The size of the quorum of replicas echo "$QUORUM" > /etc/mesos-master/quorum # The hostname advertised in ZooKeeper echo "$myip" > /etc/mesos-master/hostname # The cluster name echo "$CLUSTER_NAME" > /etc/mesos-master/cluster ###################################################################### # # Configure Marathon # mkdir -p /etc/marathon/conf # Set the ZooKeeper URL echo "${zk}/mesos" > /etc/marathon/conf/master echo "${zk}/marathon" > /etc/marathon/conf/zk # Set the hostname advertised in ZooKeeper echo "$myip" > /etc/marathon/conf/hostname magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-proxy.sh0000666000175100017510000000162613244017334027365 0ustar zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params DOCKER_PROXY_CONF=/etc/default/docker BASH_RC=/etc/bash.bashrc if [ -n "$HTTP_PROXY" ]; then echo "export http_proxy=$HTTP_PROXY" >> $DOCKER_PROXY_CONF if [ -f "$BASH_RC" ]; then echo "export http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then echo "export https_proxy=$HTTPS_PROXY" >> $DOCKER_PROXY_CONF if [ -f $BASH_RC ]; then echo "export https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$HTTP_PROXY" -o -n $HTTPS_PROXY ]; then service docker restart fi if [ -f "$BASH_RC" ]; then if [ -n "$NO_PROXY" ]; then echo "export no_proxy=$NO_PROXY" >> $BASH_RC fi else echo "File $BASH_RC does not exist, not setting no_proxy" fi magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-master.sh0000666000175100017510000000030113244017334031712 0ustar zuulzuul00000000000000#!/bin/sh # Start master services for service in zookeeper mesos-master marathon; do echo "starting service $service" service $service start rm -f /etc/init/$service.override done magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/fragments/write-heat-params-master.sh0000666000175100017510000000040213244017334032270 0ustar zuulzuul00000000000000#!/bin/sh mkdir -p /etc/sysconfig cat > /etc/sysconfig/heat-params < $CLOUD_CONFIG < /etc/mesos/zk # The hostname the slave should report echo "$myip" > /etc/mesos-slave/hostname # The IP address to listen on echo "$myip" > /etc/mesos-slave/ip # List of containerizer implementations echo "docker,mesos" > /etc/mesos-slave/containerizers # Amount of time to wait for an executor to register cat > /etc/mesos-slave/executor_registration_timeout < /etc/mesos-slave/isolation fi if [ -n "$WORK_DIR" ]; then echo "$WORK_DIR" > /etc/mesos-slave/work_dir fi if [ -n "$IMAGE_PROVIDERS" ]; then if [ -n "$ISOLATION" ]; then echo "$IMAGE_PROVIDERS" > /etc/mesos-slave/image_providers else echo "isolation doesn't exist, not setting image_providers" fi fi if [ -n "$EXECUTOR_ENVIRONMENT_VARIABLES" ]; then echo "$EXECUTOR_ENVIRONMENT_VARIABLES" > /etc/executor_environment_variables echo "file:///etc/executor_environment_variables" > /etc/mesos-slave/executor_environment_variables fi magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/mesosslave.yaml0000666000175100017510000000517613244017334026205 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Mesos slave, This stack is included by a ResourceGroup resource in the parent template (mesoscluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server slave_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. secgroup_slave_all_open_id: type: string description: ID of the security group for slave. mesos_slave_software_configs: type: string description: ID of the multipart mime. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: ###################################################################### # # a single Mesos slave. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems mesos-slave: type: OS::Nova::Server properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: slave_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_param: mesos_slave_software_configs} networks: - port: {get_resource: mesos_slave_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} mesos_slave_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_param: secgroup_slave_all_open_id fixed_ips: - subnet: {get_param: fixed_subnet} replacement_policy: AUTO mesos_slave_floating: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: mesos_slave_eth0} outputs: mesos_slave_ip: value: {get_attr: [mesos_slave_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the Mesos slave node. mesos_slave_external_ip: value: {get_attr: [mesos_slave_floating, floating_ip_address]} description: > This is the "public" address of the Mesos slave node. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/templates/mesos_slave_software_configs.yaml0000666000175100017510000001415413244017334031762 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines software configs for Mesos slave. parameters: executor_registration_timeout: type: string description: > Amount of time to wait for an executor to register with the slave before considering it hung and shutting it down http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker auth_url: type: string description: > url for mesos to authenticate before sending request username: type: string description: user name password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file hidden: true tenant_name: type: string description: > tenant_name is used to isolate access to Compute resources volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster domain_name: type: string description: > domain is to define the administrative boundaries for management of Keystone entities rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume verify_ca: type: boolean description: whether or not to validate certificate authority mesos_slave_isolation: type: string description: > Isolation mechanisms to use, e.g., `posix/cpu,posix/mem`, or `cgroups/cpu,cgroups/mem`, or network/port_mapping (configure with flag: `--with-network-isolator` to enable), or `cgroups/devices/gpus/nvidia` for nvidia specific gpu isolation (configure with flag: `--enable-nvidia -gpu-support` to enable), or `external`, or load an alternate isolator module using the `--modules` flag. Note that this flag is only relevant for the Mesos Containerizer. mesos_slave_work_dir: type: string description: directory path to place framework work directories mesos_slave_image_providers: type: string description: > Comma separated list of supported image providers e.g., APPC,DOCKER mesos_slave_executor_env_variables: type: string description: > JSON object representing the environment variables that should be passed to the executor, and thus subsequently task(s). By default the executor, executor will inherit the slave's environment variables. mesos_masters_ips: type: string description: IP addresses of the Mesos master servers. mesos_slave_wc_curl_cli: type: string description: Wait condition notify command for slave. openstack_ca: type: string description: The OpenStack CA certificate to install on the node. resources: ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params.yaml} params: "$MESOS_MASTERS_IPS": {get_param: mesos_masters_ips} "$EXECUTOR_REGISTRATION_TIMEOUT": {get_param: executor_registration_timeout} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$TENANT_NAME": {get_param: tenant_name} "$VOLUME_DRIVER": {get_param: volume_driver} "$REGION_NAME": {get_param: region_name} "$DOMAIN_NAME": {get_param: domain_name} "$REXRAY_PREEMPT": {get_param: rexray_preempt} "$ISOLATION": {get_param: mesos_slave_isolation} "$WORK_DIR": {get_param: mesos_slave_work_dir} "$IMAGE_PROVIDERS": {get_param: mesos_slave_image_providers} "$EXECUTOR_ENVIRONMENT_VARIABLES": {get_param: mesos_slave_executor_env_variables} add_ext_ca_certs: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/add-ext-ca-certs.sh} params: "@@CACERTS_CONTENT@@": {get_param: openstack_ca} configure_mesos_slave: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-mesos-slave.sh} start_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/start-services-slave.sh} slave_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify $VERIFY_CA --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_param: mesos_slave_wc_curl_cli} "$VERIFY_CA": {get_param: verify_ca} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.sh} volume_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/volume-service.sh} mesos_slave_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: add_ext_ca_certs} - config: {get_resource: write_heat_params} - config: {get_resource: configure_mesos_slave} - config: {get_resource: add_proxy} - config: {get_resource: volume_service} - config: {get_resource: start_services} - config: {get_resource: slave_wc_notify} outputs: mesos_init: value: {get_resource: mesos_slave_init} description: ID of the multipart mime. magnum-6.1.0/magnum/drivers/mesos_ubuntu_v1/COPYING0000666000175100017510000002613613244017334022174 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/0000775000175100017510000000000013244017675022414 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/version.py0000666000175100017510000000126413244017334024450 0ustar zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'swarm_fedora_atomic_v1' container_version = '1.12.6' magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/driver.py0000666000175100017510000000221213244017334024250 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.heat import driver from magnum.drivers.swarm_fedora_atomic_v1 import monitor from magnum.drivers.swarm_fedora_atomic_v1 import template_def class Driver(driver.HeatDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'fedora-atomic', 'coe': 'swarm'}, ] def get_template_definition(self): return template_def.AtomicSwarmTemplateDefinition() def get_monitor(self, context, cluster): return monitor.SwarmMonitor(context, cluster) magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/image/0000775000175100017510000000000013244017675023476 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/0000775000175100017510000000000013244017675026047 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/README.rst0000666000175100017510000000502613244017334027533 0ustar zuulzuul00000000000000=================== Neutron Openvswitch =================== This Dockerfile creates a Docker image based on Fedora 23 that runs Openvswitch and the Neutron L2 agent for Openvswitch. This container image is used by Magnum when a Swarm cluster is deployed with the attribute:: --network-driver=kuryr Magnum deploys this container on each Swarm node along with the Kuryr container to support Docker advanced networking based on the `Container Networking Model `_. To build the image, run this command in the same directory as the Dockerfile:: docker build -t openstackmagnum/fedora23-neutron-ovs:testing . This image is available on Docker Hub as:: openstackmagnum/fedora23-neutron-ovs:testing To update the image with a new build:: docker push openstackmagnum/fedora23-neutron-ovs:testing The 'testing' tag may be replaced with 'latest' or other tag as needed. This image is intended to run on the Fedora Atomic public image which by default does not have these packages installed. The common practice for Atomic OS is to run new packages in containers rather than installing them in the OS. For the Neutron agent, you will need to provide 3 files at these locations: - /etc/neutron/neutron.conf - /etc/neutron/policy.json - /etc/neutron/plugins/ml2/ml2_conf.ini These files are typically installed in the same locations on the Neutron controller node. The policy.json file is copied into the Docker image because it is fairly static and does not require customization for the cluster. If it is changed in the Neutron master repo, you just need to rebuild the Docker image to update the file. Magnum will create the other 2 files on each cluster node in the directory /etc/kuryr and map them to the proper directories in the container using the Docker -v option. Since Openvswitch needs to operate on the host network name space, the Docker container will need the -net=host option. The /var/run/openvswitch directory is also mapped to the cluster node so that the Kuryr container can talk to openvswitch. To run the image from Fedora Atomic:: docker run --net=host \ --cap-add=NET_ADMIN \ --privileged=true \ -v /var/run/openvswitch:/var/run/openvswitch \ -v /lib/modules:/lib/modules:ro \ -v /etc/kuryr/neutron.conf:/etc/neutron/neutron.conf \ -v /etc/kuryr/ml2_conf.ini:/etc/neutron/plugins/ml2/ml2_conf.ini \ --name openvswitch-agent \ openstackmagnum/fedora23-neutron-ovs:testing magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/run_openvswitch_neutron.sh0000777000175100017510000000031313244017334033404 0ustar zuulzuul00000000000000#!/bin/bash /usr/share/openvswitch/scripts/ovs-ctl start --system-id=random /usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --log-file /var/log/neutron/openvswitch-agent.log magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/Dockerfile0000666000175100017510000000103513244017334030032 0ustar zuulzuul00000000000000FROM fedora:23 MAINTAINER Ton Ngo "ton@us.ibm.com" WORKDIR / RUN dnf -y install openvswitch \ openstack-neutron-ml2 \ openstack-neutron-openvswitch \ bridge-utils \ git \ && dnf clean all RUN cd /opt \ && git clone https://git.openstack.org/openstack/neutron \ && cp neutron/etc/policy.json /etc/neutron/. \ && rm -rf neutron \ && dnf -y remove git VOLUME /var/run/openvswitch ADD run_openvswitch_neutron.sh /usr/bin/run_openvswitch_neutron.sh CMD ["/usr/bin/run_openvswitch_neutron.sh"] magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/monitor.py0000777000175100017510000000773213244017334024463 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from magnum.common import docker_utils from magnum.conductor import monitors LOG = log.getLogger(__name__) class SwarmMonitor(monitors.MonitorBase): def __init__(self, context, cluster): super(SwarmMonitor, self).__init__(context, cluster) self.data = {} self.data['nodes'] = [] self.data['containers'] = [] @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, } def pull_data(self): with docker_utils.docker_for_cluster(self.context, self.cluster) as docker: system_info = docker.info() self.data['nodes'] = self._parse_node_info(system_info) # pull data from each container containers = [] for container in docker.containers(all=True): try: container = docker.inspect_container(container['Id']) except Exception as e: LOG.warning("Ignore error [%(e)s] when inspecting " "container %(container_id)s.", {'e': e, 'container_id': container['Id']}, exc_info=True) containers.append(container) self.data['containers'] = containers def compute_memory_util(self): mem_total = 0 for node in self.data['nodes']: mem_total += node['MemTotal'] mem_reserved = 0 for container in self.data['containers']: mem_reserved += container['HostConfig']['Memory'] if mem_total == 0: return 0 else: return mem_reserved * 100 / mem_total def _parse_node_info(self, system_info): """Parse system_info to retrieve memory size of each node. :param system_info: The output returned by docker.info(). Example: { u'Debug': False, u'NEventsListener': 0, u'DriverStatus': [ [u'\x08Strategy', u'spread'], [u'\x08Filters', u'...'], [u'\x08Nodes', u'2'], [u'node1', u'10.0.0.4:2375'], [u' \u2514 Containers', u'1'], [u' \u2514 Reserved CPUs', u'0 / 1'], [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'], [u'node2', u'10.0.0.3:2375'], [u' \u2514 Containers', u'2'], [u' \u2514 Reserved CPUs', u'0 / 1'], [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'] ], u'Containers': 3 } :return: Memory size of each node. Excample: [{'MemTotal': 2203318222.848}, {'MemTotal': 2203318222.848}] """ nodes = [] for info in system_info['DriverStatus']: key = info[0] value = info[1] if key == u' \u2514 Reserved Memory': memory = value # Example: '0 B / 2.052 GiB' memory = memory.split('/')[1].strip() # Example: '2.052 GiB' memory = memory.split(' ')[0] # Example: '2.052' memory = float(memory) * 1024 * 1024 * 1024 nodes.append({'MemTotal': memory}) return nodes magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/template_def.py0000666000175100017510000000206313244017334025412 0ustar zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from magnum.drivers.heat import swarm_fedora_template_def as sftd class AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition): """Docker swarm template for a Fedora Atomic VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/cluster.yaml') magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/__init__.py0000666000175100017510000000000013244017334024505 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/0000775000175100017510000000000013244017675024412 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml0000666000175100017510000003717213244017334026763 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Docker swarm cluster. A swarm cluster is made up of several master nodes, and N agent nodes. Every node in the cluster, including the master, is running a Docker daemon and a swarm agent advertising it to the cluster. The master is running an addition swarm master container listening on port 2376. By default, the cluster is made up of one master node and one agent node. parameters: # # REQUIRED PARAMETERS # ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" discovery_url: type: string description: url provided for node discovery cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from server_image: type: string description: glance image used to boot the server # # OPTIONAL PARAMETERS # master_flavor: type: string default: m1.small description: flavor to use when booting the swarm master node_flavor: type: string default: m1.small description: flavor to use when booting the swarm node dns_nameserver: type: string description: address of a dns nameserver reachable in your environment default: 8.8.8.8 http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" number_of_masters: type: number description: how many swarm masters to spawn default: 1 number_of_nodes: type: number description: how many swarm nodes to spawn default: 1 fixed_network_cidr: type: string description: network range for fixed ip network default: "10.0.0.0/24" tls_disabled: type: boolean description: whether or not to enable TLS default: False verify_ca: type: boolean description: whether or not to validate certificate authority network_driver: type: string description: network driver to use for instantiating container networks default: None flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each master default: 24 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] swarm_port: type: number description: > The port which are used by swarm manager to provide swarm service. default: 2376 swarm_version: type: string description: version of swarm used for swarm cluster default: 1.2.5 swarm_strategy: type: string description: > schedule strategy to be used by swarm manager default: "spread" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true auth_url: type: string description: url for keystone registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 swift_region: type: string description: region of swift service default: "" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 volume_driver: type: string description: volume driver to use for container storage default: "" constraints: - allowed_values: ["","rexray"] rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] resources: ###################################################################### # # network resources. allocate a network and router for our server. # it would also be possible to take advantage of existing network # resources (and have the deployer provide network and subnet ids, # etc, as parameters), but I wanted to minmize the amount of # configuration necessary to make this go. network: type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_network_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} api_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: swarm_port} etcd_lb: type: ../../common/templates/lb.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: 2379 ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_swarm_manager: type: "OS::Neutron::SecurityGroup" properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 2376 port_range_max: 2376 - protocol: tcp remote_ip_prefix: {get_param: fixed_network_cidr} port_range_min: 1 port_range_max: 65535 - protocol: udp port_range_min: 53 port_range_max: 53 secgroup_swarm_node: type: "OS::Neutron::SecurityGroup" properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # resources that expose the IPs of either the swarm master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [swarm_masters, resource.0.swarm_master_external_ip]} master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]} ###################################################################### # # resources that expose the server group for all nodes include master # and minions. # nodes_server_group: type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_lb, address]} master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]} ###################################################################### # # Swarm manager is responsible for the entire cluster and manages the # resources of multiple Docker hosts at scale. # It supports high availability by create a primary manager and multiple # replica instances. swarm_masters: type: "OS::Heat::ResourceGroup" depends_on: - network properties: count: {get_param: number_of_masters} resource_def: type: swarmmaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} server_flavor: {get_param: master_flavor} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} fixed_network_id: {get_attr: [network, fixed_network]} fixed_subnet_id: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} discovery_url: {get_param: discovery_url} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} swarm_api_ip: {get_attr: [api_lb, address]} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} swarm_port: {get_param: swarm_port} api_pool_id: {get_attr: [api_lb, pool_id]} etcd_pool_id: {get_attr: [etcd_lb, pool_id]} etcd_server_ip: {get_attr: [etcd_lb, address]} api_ip_address: {get_attr: [api_lb, floating_address]} swarm_version: {get_param: swarm_version} swarm_strategy: {get_param: swarm_strategy} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} volume_driver: {get_param: volume_driver} rexray_preempt: {get_param: rexray_preempt} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} swarm_nodes: type: "OS::Heat::ResourceGroup" depends_on: - network properties: count: {get_param: number_of_nodes} resource_def: type: swarmnode.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'node', '%index%'] ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} server_flavor: {get_param: node_flavor} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} fixed_network_id: {get_attr: [network, fixed_network]} fixed_subnet_id: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} secgroup_swarm_node_id: {get_resource: secgroup_swarm_node} flannel_network_cidr: {get_param: flannel_network_cidr} network_driver: {get_param: network_driver} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} swarm_version: {get_param: swarm_version} trustee_domain_id: {get_param: trustee_domain_id} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} swift_region: {get_param: swift_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} volume_driver: {get_param: volume_driver} rexray_preempt: {get_param: rexray_preempt} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} description: > This is the API endpoint of the Swarm masters. Use this to access the Swarm API server from outside the cluster. swarm_masters_private: value: {get_attr: [swarm_masters, swarm_master_ip]} description: > This is a list of the "private" addresses of all the Swarm masters. swarm_masters: value: {get_attr: [swarm_masters, swarm_master_external_ip]} description: > This is a list of "public" ip addresses of all Swarm masters. Use these addresses to log into the Swarm masters via ssh. swarm_nodes_private: value: {get_attr: [swarm_nodes, swarm_node_ip]} description: > This is a list of the "private" addresses of all the Swarm nodes. swarm_nodes: value: {get_attr: [swarm_nodes, swarm_node_external_ip]} description: > This is a list of the "public" addresses of all the Swarm nodes. Use these addresses to, e.g., log into the nodes. discovery_url: value: {get_param: discovery_url} description: > This the discovery url for Swarm cluster. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml0000666000175100017510000003300513244017334027270 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single swarm node, based on a vanilla Fedora 20 cloud image. This stack is included by a ResourceGroup resource in the parent template (swarmcluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server server_flavor: type: string description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name external_network: type: string description: uuid/name of a network to use for floating ip addresses fixed_network_id: type: string description: Network from which to allocate fixed addresses. fixed_subnet_id: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker swarm_api_ip: type: string description: swarm master's api server ip address api_ip_address: type: string description: swarm master's api server public ip address cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from tls_disabled: type: boolean description: whether or not to disable TLS verify_ca: type: boolean description: whether or not to validate certificate authority swarm_version: type: string description: version of swarm used for swarm cluster secgroup_swarm_node_id: type: string description: ID of the security group for swarm node. etcd_server_ip: type: string description: ip address of the load balancer pool of etcd server. trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. registry_port: type: number description: port of registry service swift_region: type: string description: region of swift service registry_container: type: string description: > name of swift container which docker registry stores images in registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects volume_driver: type: string description: volume driver to use for container storage default: "" rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: node_wait_handle: type: "OS::Heat::WaitConditionHandle" node_wait_condition: type: "OS::Heat::WaitCondition" depends_on: swarm-node properties: handle: {get_resource: node_wait_handle} timeout: 6000 ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. no_proxy_extended: type: OS::Heat::Value properties: type: string value: list_join: - ',' - - {get_param: swarm_api_ip} - {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} - {get_param: etcd_server_ip} - {get_param: api_ip_address} - {get_param: no_proxy} write_heat_params: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml} params: "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_attr: [no_proxy_extended, value]} "$SWARM_API_IP": {get_param: swarm_api_ip} "$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$NETWORK_DRIVER": {get_param: network_driver} "$ETCD_SERVER_IP": {get_param: etcd_server_ip} "$API_IP_ADDRESS": {get_param: api_ip_address} "$SWARM_VERSION": {get_param: swarm_version} "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_USERNAME": {get_param: trustee_username} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$REGISTRY_ENABLED": {get_param: registry_enabled} "$REGISTRY_PORT": {get_param: registry_port} "$SWIFT_REGION": {get_param: swift_region} "$REGISTRY_CONTAINER": {get_param: registry_container} "$REGISTRY_INSECURE": {get_param: registry_insecure} "$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize} "$VOLUME_DRIVER": {get_param: volume_driver} "$REXRAY_PREEMPT": {get_param: rexray_preempt} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} remove_docker_key: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} make_cert: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} configure_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} add_docker_daemon_options: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} write_docker_socket: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} network_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/network-service.sh} write_swarm_agent_failure_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-cluster-failure-service.yaml} params: "$SERVICE": swarm-agent "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} "$VERIFY_CA": {get_param: verify_ca} write_swarm_agent_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-swarm-agent-service.sh} enable_docker_registry: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} enable_services: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} params: "$NODE_SERVICES": "docker.socket docker swarm-agent" cfn_signal: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/cfn-signal.sh} configure_selinux: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} add_proxy: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} volume_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} swarm_node_init: type: "OS::Heat::MultipartMime" properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: configure_selinux} - config: {get_resource: remove_docker_key} - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: network_service} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_docker_registry} - config: {get_resource: write_swarm_agent_failure_service} - config: {get_resource: write_swarm_agent_service} - config: {get_resource: add_docker_daemon_options} - config: {get_resource: write_docker_socket} - config: {get_resource: add_proxy} - config: {get_resource: enable_docker_registry} - config: {get_resource: enable_services} - config: {get_resource: cfn_signal} - config: {get_resource: volume_service} # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems swarm-node: type: "OS::Nova::Server" properties: name: {get_param: name} image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name user_data_format: RAW user_data: {get_resource: swarm_node_init} networks: - port: get_resource: swarm_node_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} swarm_node_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network_id security_groups: - {get_param: secgroup_swarm_node_id} fixed_ips: - subnet_id: get_param: fixed_subnet_id allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} swarm_node_floating: type: "OS::Neutron::FloatingIP" properties: floating_network: get_param: external_network port_id: get_resource: swarm_node_eth0 ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the node. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: swarm-node} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: swarm_node_ip: value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" address of the Swarm node. swarm_node_external_ip: value: {get_attr: [swarm_node_floating, floating_ip_address]} description: > This is the "public" address of the Swarm node. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/README.md0000666000175100017510000000635013244017334025667 0ustar zuulzuul00000000000000A Docker swarm cluster with Heat ============================== These [Heat][] templates will deploy an *N*-node [swarm][] cluster, where *N* is the value of the `number_of_nodes` parameter you specify when creating the stack. [heat]: https://wiki.openstack.org/wiki/Heat [swarm]: https://github.com/docker/swarm/ ## Requirements ### OpenStack These templates will work with the Juno version of Heat. ### Guest image These templates will work with either CentOS Atomic Host or Fedora 21 Atomic. ## Creating the stack First, you must create a swarm token, which is used to uniquely identify the cluster to the global discovery service. This can be done by issuing a create call to the swarm CLI. Alternatively, if you have access to Docker you can use the dockerswarm/swarm image. $ swarm create afeb445bcb2f573aeb8ff3a199785f45 $ docker run dockerswarm/swarm create d8cdfe5128af6e1075b34aa06ff1cc2c Creating an environment file `local.yaml` with parameters specific to your environment: parameters: ssh_key_name: testkey external_network: 028d70dd-67b8-4901-8bdd-0c62b06cce2d dns_nameserver: 192.168.200.1 server_image: fedora-atomic-latest discovery_url: token://d8cdfe5128af6e1075b34aa06ff1cc2c And then create the stack, referencing that environment file: heat stack-create -f swarm.yaml -e local.yaml my-swarm-cluster You must provide values for: - `ssh_key_name` - `external_network` - `server_image` - `discovery_url` ## Interacting with Swarm The Docker CLI interacts with the cluster through the swarm master listening on port 2376. You can get the ip address of the swarm master using the `heat output-show` command: $ heat output-show my-swarm-cluster swarm_master "192.168.200.86" Provide the Docker CLI with the address for the swarm master. $ docker -H tcp://192.168.200.86:2376 info Containers: 4 Nodes: 3 swarm-master: 10.0.0.1:2375 swarm-node1: 10.0.0.2:2375 swarm-node2: 10.0.0.3:2375 ## Testing You can test the swarm cluster with the Docker CLI by running a container. In the example below, a container is spawned in the cluster to ping 8.8.8.8. $ docker -H tcp://192.168.200.86:2376 run -i cirros /bin/ping -c 4 8.8.8.8 PING 8.8.8.8 (8.8.8.8): 56 data bytes 64 bytes from 8.8.8.8: seq=0 ttl=127 time=40.749 ms 64 bytes from 8.8.8.8: seq=1 ttl=127 time=46.264 ms 64 bytes from 8.8.8.8: seq=2 ttl=127 time=42.808 ms 64 bytes from 8.8.8.8: seq=3 ttl=127 time=42.270 ms --- 8.8.8.8 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 40.749/43.022/46.264 ms ## License Copyright 2014 Lars Kellogg-Stedman Copyright 2015 Rackspace Hosting Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmmaster.yaml0000666000175100017510000003761013244017334027644 0ustar zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines swarm master node. A swarm mater node is running a Docker daemon and a swarm manager container listening on port 2376. parameters: name: type: string description: server name ssh_key_name: type: string description: name of ssh key to be provisioned on our server docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name external_network: type: string description: uuid/name of a network to use for floating ip addresses discovery_url: type: string description: url provided for node discovery cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from fixed_network_id: type: string description: Network from which to allocate fixed addresses. fixed_subnet_id: type: string description: Subnet from which to allocate fixed addresses. swarm_api_ip: type: string description: swarm master's api server ip address default: "" api_ip_address: type: string description: swarm master's api server public ip address default: "" server_image: type: string description: glance image used to boot the server server_flavor: type: string description: flavor to use when booting the server http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker tls_disabled: type: boolean description: whether or not to enable TLS verify_ca: type: boolean description: whether or not to validate certificate authority network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] swarm_version: type: string description: version of swarm used for swarm cluster swarm_strategy: type: string description: > schedule strategy to be used by swarm manager constraints: - allowed_values: ["spread", "binpack", "random"] secgroup_swarm_master_id: type: string description: ID of the security group for swarm master. swarm_port: type: number description: > The port which are used by swarm manager to provide swarm service. api_pool_id: type: string description: ID of the load balancer pool of swarm master server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. etcd_server_ip: type: string description: ip address of the load balancer pool of etcd server. default: "" trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone volume_driver: type: string description: volume driver to use for container storage default: "" rexray_preempt: type: string description: > enables any host to take control of a volume irrespective of whether other hosts are using the volume default: "false" openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. resources: master_wait_handle: type: "OS::Heat::WaitConditionHandle" master_wait_condition: type: "OS::Heat::WaitCondition" depends_on: swarm-master properties: handle: {get_resource: master_wait_handle} timeout: 6000 ###################################################################### # # resource that exposes the IPs of either the Swarm master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_ip_address} pool_private_ip: {get_param: swarm_api_ip} master_public_ip: {get_attr: [swarm_master_floating, floating_ip_address]} master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} etcd_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_param: etcd_server_ip} master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # no_proxy_extended: type: OS::Heat::Value properties: type: string value: list_join: - ',' - - {get_attr: [api_address_switch, private_ip]} - {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - {get_attr: [etcd_address_switch, private_ip]} - {get_attr: [api_address_switch, public_ip]} - {get_param: no_proxy} write_heat_params: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-master.yaml} params: "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_attr: [no_proxy_extended, value]} "$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]} "$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$ETCD_SERVER_IP": {get_attr: [etcd_address_switch, private_ip]} "$API_IP_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$SWARM_VERSION": {get_param: swarm_version} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$AUTH_URL": {get_param: auth_url} "$VOLUME_DRIVER": {get_param: volume_driver} "$REXRAY_PREEMPT": {get_param: rexray_preempt} install_openstack_ca: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} write_network_config: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-network-config.sh} network_config_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/network-config-service.sh} network_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/network-service.sh} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/configure-etcd.sh} remove_docker_key: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} make_cert: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} add_docker_daemon_options: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} write_swarm_manager_failure_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-cluster-failure-service.yaml} params: "$SERVICE": swarm-manager "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$VERIFY_CA": {get_param: verify_ca} write_docker_socket: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} write_swarm_master_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/write-swarm-master-service.sh} params: "$ETCD_SERVER_IP": {get_attr: [etcd_address_switch, private_ip]} "$NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_attr: [no_proxy_extended, value]} "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$SWARM_VERSION": {get_param: swarm_version} "$SWARM_STRATEGY": {get_param: swarm_strategy} enable_services: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: str_replace: template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} params: "$NODE_SERVICES": "etcd docker.socket docker swarm-manager" cfn_signal: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/cfn-signal.sh} configure_selinux: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} add_proxy: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} volume_service: type: "OS::Heat::SoftwareConfig" properties: group: ungrouped config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} swarm_master_init: type: "OS::Heat::MultipartMime" properties: parts: - config: {get_resource: install_openstack_ca} - config: {get_resource: configure_selinux} - config: {get_resource: remove_docker_key} - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_etcd} - config: {get_resource: write_network_config} - config: {get_resource: network_config_service} - config: {get_resource: network_service} - config: {get_resource: configure_docker_storage} - config: {get_resource: write_swarm_manager_failure_service} - config: {get_resource: add_docker_daemon_options} - config: {get_resource: write_docker_socket} - config: {get_resource: write_swarm_master_service} - config: {get_resource: add_proxy} - config: {get_resource: enable_services} - config: {get_resource: cfn_signal} - config: {get_resource: volume_service} ###################################################################### # # Swarm_manager is a special node running the swarm manage daemon along # side the swarm agent. # # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems swarm-master: type: "OS::Nova::Server" properties: name: {get_param: name} image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name user_data_format: RAW user_data: {get_resource: swarm_master_init} networks: - port: get_resource: swarm_master_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} swarm_master_eth0: type: "OS::Neutron::Port" properties: network_id: get_param: fixed_network_id security_groups: - {get_param: secgroup_swarm_master_id} fixed_ips: - subnet_id: get_param: fixed_subnet_id allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} swarm_master_floating: type: "OS::Neutron::FloatingIP" properties: floating_network: get_param: external_network port_id: get_resource: swarm_master_eth0 api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet_id } protocol_port: {get_param: swarm_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet_id } protocol_port: 2379 ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the node. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: swarm-master} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: swarm_master_ip: value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" addresses of all the Swarm master. swarm_master_external_ip: value: {get_attr: [swarm_master_floating, floating_ip_address]} description: > This is the "public" ip addresses of Swarm master. magnum-6.1.0/magnum/drivers/swarm_fedora_atomic_v1/templates/COPYING0000666000175100017510000002613613244017334025447 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. magnum-6.1.0/magnum/tests/0000775000175100017510000000000013244017675015465 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/conf_fixture.py0000666000175100017510000000230113244017334020520 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from magnum.common import config import magnum.conf CONF = magnum.conf.CONF class ConfFixture(fixtures.Fixture): """Fixture to manage global conf settings.""" def _setUp(self): CONF.set_default('host', 'fake-mini') CONF.set_default('connection', "sqlite://", group='database') CONF.set_default('sqlite_synchronous', False, group='database') config.parse_args([], default_config_files=[]) self.addCleanup(CONF.reset) magnum-6.1.0/magnum/tests/functional/0000775000175100017510000000000013244017675017627 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/swarm_mode/0000775000175100017510000000000013244017675021764 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/swarm_mode/test_swarm_mode_python_client.py0000666000175100017510000001124113244017334030462 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import docker import requests import time import magnum.conf from magnum.tests.functional.python_client_base import ClusterTest CONF = magnum.conf.CONF class TestSwarmModeAPIs(ClusterTest): """This class will cover swarm cluster basic functional testing. Will test all kinds of container action with tls_disabled=False mode. """ coe = "swarm-mode" cluster_template_kwargs = { "tls_disabled": False, "network_driver": None, "volume_driver": None, "labels": {} } @classmethod def setUpClass(cls): super(TestSwarmModeAPIs, cls).setUpClass() cls.cluster_is_ready = None def setUp(self): super(TestSwarmModeAPIs, self).setUp() if self.cluster_is_ready is True: return # Note(eliqiao): In our test cases, docker client or magnum client will # try to connect to swarm service which is running on master node, # the endpoint is cluster.api_address(listen port is included), but the # service is not ready right after the cluster was created, sleep for # an acceptable time to wait for service being started. # This is required, without this any api call will fail as # 'ConnectionError: [Errno 111] Connection refused'. msg = ("If you see this error in the functional test, it means " "the docker service took too long to come up. This may not " "be an actual error, so an option is to rerun the " "functional test.") if self.cluster_is_ready is False: # In such case, no need to test below cases on gate, raise a # meanful exception message to indicate ca setup failed after # cluster creation, better to do a `recheck` # We don't need to test since cluster is not ready. raise Exception(msg) url = self.cs.clusters.get(self.cluster.uuid).api_address # Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10, # tested this default configure option not works on gate, it will # cause container creation failed due to time out. # Debug more found that we need to pull image when the first time to # create a container, set it as 180s. docker_api_time_out = 180 tls_config = docker.tls.TLSConfig( client_cert=(self.cert_file, self.key_file), verify=self.ca_file ) self.docker_client = docker.DockerClient( base_url=url, tls=tls_config, version='auto', timeout=docker_api_time_out) self.docker_client_non_tls = docker.DockerClient( base_url=url, version='1.21', timeout=docker_api_time_out) def test_create_remove_service(self): # Create and remove a service using docker python SDK. # Wait 15 mins until reach running and 5 mins until the service # is removed. # Create a nginx service based on alpine linux service = self.docker_client.services.create( name='nginx', image='nginx:mainline-alpine') # wait for 15 mins to be running for i in range(90): if service.tasks()[0]['Status']['State'] == "running": break time.sleep(10) # Verify that it is running self.assertEqual('running', service.tasks()[0]['Status']['State']) # Remove the service and wait for 5 mins untils it is removed service.remove() for i in range(30): if self.docker_client.services.list() == []: break time.sleep(10) # Verify that it is deleted self.assertEqual([], self.docker_client.services.list()) def test_access_with_non_tls_client(self): """Try to contact master's docker using the tcp protocol. tcp returns ConnectionError whereas https returns SSLError. The default protocol we use in magnum is tcp which works fine docker python SDK docker>=2.0.0. """ try: self.docker_client_non_tls.info() except requests.exceptions.ConnectionError: pass magnum-6.1.0/magnum/tests/functional/swarm_mode/__init__.py0000666000175100017510000000000013244017334024055 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/mesos/0000775000175100017510000000000013244017675020755 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/mesos/test_mesos_python_client.py0000666000175100017510000000154213244017334026447 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.python_client_base import ClusterTest class TestClusterResource(ClusterTest): coe = 'mesos' cluster_template_kwargs = { "tls_disabled": True, "network_driver": 'docker', "volume_driver": 'rexray' } def test_cluster_create_and_delete(self): pass magnum-6.1.0/magnum/tests/functional/mesos/__init__.py0000666000175100017510000000000013244017334023046 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s/0000775000175100017510000000000013244017675020334 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s/test_magnum_python_client.py0000666000175100017510000000152113244017334026161 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.python_client_base import BaseMagnumClient class TestListResources(BaseMagnumClient): def test_cluster_model_list(self): self.assertIsNotNone(self.cs.cluster_templates.list()) def test_cluster_list(self): self.assertIsNotNone(self.cs.clusters.list()) magnum-6.1.0/magnum/tests/functional/k8s/__init__.py0000666000175100017510000000000013244017334022425 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s/test_k8s_python_client.py0000666000175100017510000000201713244017334025403 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestKubernetesAPIs(base.BaseK8sTest): cluster_template_kwargs = { "tls_disabled": False, "network_driver": 'flannel', "volume_driver": 'cinder', "docker_storage_driver": 'overlay', "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "admission_control_list": "", "kube_dashboard_enabled": False, } } magnum-6.1.0/magnum/tests/functional/common/0000775000175100017510000000000013244017675021117 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/common/datagen.py0000666000175100017510000004533113244017334023074 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import socket import string import struct from tempest.lib.common.utils import data_utils from magnum.tests.functional.api.v1.models import bay_model from magnum.tests.functional.api.v1.models import baymodel_model from magnum.tests.functional.api.v1.models import baymodelpatch_model from magnum.tests.functional.api.v1.models import baypatch_model from magnum.tests.functional.api.v1.models import cert_model from magnum.tests.functional.api.v1.models import cluster_model from magnum.tests.functional.api.v1.models import cluster_template_model from magnum.tests.functional.api.v1.models import cluster_templatepatch_model from magnum.tests.functional.api.v1.models import clusterpatch_model from magnum.tests.functional.common import config def random_int(min_int=1, max_int=100): return random.randrange(min_int, max_int) def gen_coe_dep_network_driver(coe): allowed_driver_types = { 'kubernetes': ['flannel', None], 'swarm': ['docker', 'flannel', None], 'swarm-mode': ['docker', None], 'mesos': ['docker', None], } driver_types = allowed_driver_types[coe] return driver_types[random.randrange(0, len(driver_types))] def gen_coe_dep_volume_driver(coe): allowed_driver_types = { 'kubernetes': ['cinder', None], 'swarm': ['rexray', None], 'swarm-mode': ['rexray', None], 'mesos': ['rexray', None], } driver_types = allowed_driver_types[coe] return driver_types[random.randrange(0, len(driver_types))] def gen_random_port(): return random_int(49152, 65535) def gen_docker_volume_size(min_int=3, max_int=5): return random_int(min_int, max_int) def gen_fake_ssh_pubkey(): chars = "".join( random.choice(string.ascii_uppercase + string.ascii_letters + string.digits + '/+=') for _ in range(372)) return "ssh-rsa " + chars def gen_random_ip(): return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) def gen_url(scheme="http", domain="example.com", port=80): return "%s://%s:%s" % (scheme, domain, port) def gen_http_proxy(): return gen_url(port=gen_random_port()) def gen_https_proxy(): return gen_url(scheme="https", port=gen_random_port()) def gen_no_proxy(): return ",".join(gen_random_ip() for x in range(3)) def baymodel_data(**kwargs): """Generates random baymodel data Keypair and image id cannot be random for the baymodel to be valid due to validations for the presence of keypair and image id prior to baymodel creation. :param keypair_id: keypair name :param image_id: image id or name :returns: BayModelEntity with generated data """ data = { "name": data_utils.rand_name('bay'), "coe": "swarm-mode", "tls_disabled": False, "network_driver": None, "volume_driver": None, "labels": {}, "public": False, "dns_nameserver": "8.8.8.8", "flavor_id": data_utils.rand_name('bay'), "master_flavor_id": data_utils.rand_name('bay'), "external_network_id": config.Config.nic_id, "keypair_id": data_utils.rand_name('bay'), "image_id": data_utils.rand_name('bay') } data.update(kwargs) model = baymodel_model.BayModelEntity.from_dict(data) return model def baymodel_replace_patch_data(path, value=data_utils.rand_name('bay')): """Generates random baymodel patch data :param path: path to replace :param value: value to replace in patch :returns: BayModelPatchCollection with generated data """ data = [{ "path": path, "value": value, "op": "replace" }] return baymodelpatch_model.BayModelPatchCollection.from_dict(data) def baymodel_remove_patch_data(path): """Generates baymodel patch data by removing value :param path: path to remove :returns: BayModelPatchCollection with generated data """ data = [{ "path": path, "op": "remove" }] return baymodelpatch_model.BayModelPatchCollection.from_dict(data) def baymodel_data_with_valid_keypair_image_flavor(): """Generates random baymodel data with valid keypair,image and flavor :returns: BayModelEntity with generated data """ return baymodel_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def baymodel_data_with_missing_image(): """Generates random baymodel data with missing image :returns: BayModelEntity with generated data """ return baymodel_data(keypair_id=config.Config.keypair_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def baymodel_data_with_missing_flavor(): """Generates random baymodel data with missing flavor :returns: BayModelEntity with generated data """ return baymodel_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id) def baymodel_data_with_missing_keypair(): """Generates random baymodel data with missing keypair :returns: BayModelEntity with generated data """ return baymodel_data(image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def baymodel_valid_data_with_specific_coe(coe): """Generates random baymodel data with valid keypair and image :param coe: coe :returns: BayModelEntity with generated data """ return baymodel_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, coe=coe) def valid_swarm_mode_baymodel(is_public=False): """Generates a valid swarm mode baymodel with valid data :returns: BayModelEntity with generated data """ return baymodel_data(image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, public=is_public, dns_nameserver=config.Config.dns_nameserver, master_flavor_id=config.Config.master_flavor_id, keypair_id=config.Config.keypair_id, coe="swarm-mode", cluster_distro=None, external_network_id=config.Config.nic_id, http_proxy=None, https_proxy=None, no_proxy=None, network_driver=None, volume_driver=None, labels={}, tls_disabled=False) def bay_data(name=data_utils.rand_name('bay'), baymodel_id=data_utils.rand_uuid(), node_count=random_int(1, 5), discovery_url=gen_random_ip(), bay_create_timeout=random_int(1, 30), master_count=random_int(1, 5)): """Generates random bay data BayModel_id cannot be random for the bay to be valid due to validations for the presence of baymodel prior to baymodel creation. :param name: bay name (must be unique) :param baymodel_id: baymodel unique id (must already exist) :param node_count: number of agents for bay :param discovery_url: url provided for node discovery :param bay_create_timeout: timeout in minutes for bay create :param master_count: number of master nodes for the bay :returns: BayEntity with generated data """ data = { "name": name, "baymodel_id": baymodel_id, "node_count": node_count, "discovery_url": None, "bay_create_timeout": bay_create_timeout, "master_count": master_count } model = bay_model.BayEntity.from_dict(data) return model def valid_bay_data(baymodel_id, name=data_utils.rand_name('bay'), node_count=1, master_count=1, bay_create_timeout=None): """Generates random bay data with valid :param baymodel_id: baymodel unique id that already exists :param name: bay name (must be unique) :param node_count: number of agents for bay :returns: BayEntity with generated data """ return bay_data(baymodel_id=baymodel_id, name=name, master_count=master_count, node_count=node_count, bay_create_timeout=bay_create_timeout) def bay_name_patch_data(name=data_utils.rand_name('bay')): """Generates random baymodel patch data :param name: name to replace in patch :returns: BayPatchCollection with generated data """ data = [{ "path": "/name", "value": name, "op": "replace" }] return baypatch_model.BayPatchCollection.from_dict(data) def bay_api_addy_patch_data(address='0.0.0.0'): """Generates random bay patch data :param name: name to replace in patch :returns: BayPatchCollection with generated data """ data = [{ "path": "/api_address", "value": address, "op": "replace" }] return baypatch_model.BayPatchCollection.from_dict(data) def bay_node_count_patch_data(node_count=2): """Generates random bay patch data :param name: name to replace in patch :returns: BayPatchCollection with generated data """ data = [{ "path": "/node_count", "value": node_count, "op": "replace" }] return baypatch_model.BayPatchCollection.from_dict(data) def cert_data(cluster_uuid, csr_data): data = { "cluster_uuid": cluster_uuid, "csr": csr_data} model = cert_model.CertEntity.from_dict(data) return model def cluster_template_data(**kwargs): """Generates random cluster_template data Keypair and image id cannot be random for the cluster_template to be valid due to validations for the presence of keypair and image id prior to cluster_template creation. :param keypair_id: keypair name :param image_id: image id or name :returns: ClusterTemplateEntity with generated data """ data = { "name": data_utils.rand_name('cluster'), "coe": "swarm-mode", "tls_disabled": False, "network_driver": None, "volume_driver": None, "labels": {}, "public": False, "dns_nameserver": "8.8.8.8", "flavor_id": data_utils.rand_name('cluster'), "master_flavor_id": data_utils.rand_name('cluster'), "external_network_id": config.Config.nic_id, "keypair_id": data_utils.rand_name('cluster'), "image_id": data_utils.rand_name('cluster') } data.update(kwargs) model = cluster_template_model.ClusterTemplateEntity.from_dict(data) return model def cluster_template_replace_patch_data(path, value=data_utils.rand_name('cluster')): """Generates random ClusterTemplate patch data :param path: path to replace :param value: value to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": path, "value": value, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_remove_patch_data(path): """Generates ClusterTemplate patch data by removing value :param path: path to remove :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": path, "op": "remove" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_name_patch_data(name=data_utils.rand_name('cluster')): """Generates random cluster_template patch data :param name: name to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": "/name", "value": name, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_flavor_patch_data(flavor=data_utils.rand_name('cluster')): """Generates random cluster_template patch data :param flavor: flavor to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": "/flavor_id", "value": flavor, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_data_with_valid_keypair_image_flavor(): """Generates random clustertemplate data with valid data :returns: ClusterTemplateEntity with generated data """ master_flavor = config.Config.master_flavor_id return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=master_flavor) def cluster_template_data_with_missing_image(): """Generates random cluster_template data with missing image :returns: ClusterTemplateEntity with generated data """ return cluster_template_data( keypair_id=config.Config.keypair_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def cluster_template_data_with_missing_flavor(): """Generates random cluster_template data with missing flavor :returns: ClusterTemplateEntity with generated data """ return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id) def cluster_template_data_with_missing_keypair(): """Generates random cluster_template data with missing keypair :returns: ClusterTemplateEntity with generated data """ return cluster_template_data( image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def cluster_template_valid_data_with_specific_coe(coe): """Generates random cluster_template data with valid keypair and image :param coe: coe :returns: ClusterTemplateEntity with generated data """ return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, coe=coe) def valid_swarm_mode_cluster_template(is_public=False): """Generates a valid swarm-mode cluster_template with valid data :returns: ClusterTemplateEntity with generated data """ master_flavor_id = config.Config.master_flavor_id return cluster_template_data(image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, public=is_public, dns_nameserver=config.Config.dns_nameserver, master_flavor_id=master_flavor_id, coe="swarm-mode", cluster_distro=None, external_network_id=config.Config.nic_id, http_proxy=None, https_proxy=None, no_proxy=None, network_driver=None, volume_driver=None, labels={}, tls_disabled=False) def cluster_data(name=data_utils.rand_name('cluster'), cluster_template_id=data_utils.rand_uuid(), node_count=random_int(1, 5), discovery_url=gen_random_ip(), create_timeout=random_int(1, 30), master_count=random_int(1, 5)): """Generates random cluster data cluster_template_id cannot be random for the cluster to be valid due to validations for the presence of clustertemplate prior to clustertemplate creation. :param name: cluster name (must be unique) :param cluster_template_id: clustertemplate unique id (must already exist) :param node_count: number of agents for cluster :param discovery_url: url provided for node discovery :param create_timeout: timeout in minutes for cluster create :param master_count: number of master nodes for the cluster :returns: ClusterEntity with generated data """ data = { "name": name, "cluster_template_id": cluster_template_id, "keypair": config.Config.keypair_id, "node_count": node_count, "discovery_url": None, "create_timeout": create_timeout, "master_count": master_count } model = cluster_model.ClusterEntity.from_dict(data) return model def valid_cluster_data(cluster_template_id, name=data_utils.rand_name('cluster'), node_count=1, master_count=1, create_timeout=None): """Generates random cluster data with valid :param cluster_template_id: clustertemplate unique id that already exists :param name: cluster name (must be unique) :param node_count: number of agents for cluster :returns: ClusterEntity with generated data """ return cluster_data(cluster_template_id=cluster_template_id, name=name, master_count=master_count, node_count=node_count, create_timeout=create_timeout) def cluster_name_patch_data(name=data_utils.rand_name('cluster')): """Generates random clustertemplate patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/name", "value": name, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) def cluster_api_addy_patch_data(address='0.0.0.0'): """Generates random cluster patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/api_address", "value": address, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) def cluster_node_count_patch_data(node_count=2): """Generates random cluster patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/node_count", "value": node_count, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) magnum-6.1.0/magnum/tests/functional/common/client.py0000666000175100017510000000314513244017334022744 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from six.moves.urllib import parse from tempest.lib.common import rest_client from magnum.tests.functional.common import config @six.add_metaclass(abc.ABCMeta) class MagnumClient(rest_client.RestClient): """Abstract class responsible for setting up auth provider""" def __init__(self, auth_provider): super(MagnumClient, self).__init__( auth_provider=auth_provider, service='container-infra', region=config.Config.region, disable_ssl_certificate_validation=True ) @classmethod def deserialize(cls, resp, body, model_type): return resp, model_type.from_json(body) @property def tenant_id(self): return self.client.tenant_id @classmethod def add_filters(cls, url, filters): """add_filters adds dict values (filters) to url as query parameters :param url: base URL for the request :param filters: dict with var:val pairs to add as parameters to URL :returns: url string """ return url + "?" + parse(filters) magnum-6.1.0/magnum/tests/functional/common/manager.py0000666000175100017510000000571613244017334023106 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest.common import credentials_factory as common_creds from magnum.tests.functional.api.v1.clients import bay_client from magnum.tests.functional.api.v1.clients import baymodel_client from magnum.tests.functional.api.v1.clients import cert_client from magnum.tests.functional.api.v1.clients import cluster_client from magnum.tests.functional.api.v1.clients import cluster_template_client from magnum.tests.functional.api.v1.clients import magnum_service_client from magnum.tests.functional.common import client from magnum.tests.functional.common import config class Manager(clients.Manager): def __init__(self, credentials=None, request_type=None): if not credentials: credentials = common_creds.get_configured_credentials( 'identity_admin') super(Manager, self).__init__(credentials) self.auth_provider.orig_base_url = self.auth_provider.base_url self.auth_provider.base_url = self.bypassed_base_url auth = self.auth_provider if request_type == 'baymodel': self.client = baymodel_client.BayModelClient(auth) elif request_type == 'bay': self.client = bay_client.BayClient(auth) elif request_type == 'cert': self.client = cert_client.CertClient(auth) elif request_type == 'cluster_template': self.client = cluster_template_client.ClusterTemplateClient(auth) elif request_type == 'cluster': self.client = cluster_client.ClusterClient(auth) elif request_type == 'service': self.client = magnum_service_client.MagnumServiceClient(auth) else: self.client = client.MagnumClient(auth) def bypassed_base_url(self, filters, auth_data=None): if (config.Config.magnum_url and filters['service'] == 'container-infra'): return config.Config.magnum_url return self.auth_provider.orig_base_url(filters, auth_data=auth_data) class DefaultManager(Manager): def __init__(self, credentials, request_type=None): super(DefaultManager, self).__init__(credentials, request_type) class AltManager(Manager): def __init__(self, credentials, request_type=None): super(AltManager, self).__init__(credentials, request_type) class AdminManager(Manager): def __init__(self, credentials, request_type=None): super(AdminManager, self).__init__(credentials, request_type) magnum-6.1.0/magnum/tests/functional/common/base.py0000777000175100017510000000670713244017334022412 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import subprocess from tempest.lib import base import magnum COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" class BaseMagnumTest(base.BaseTestCase): """Sets up configuration required for functional tests""" LOG = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(BaseMagnumTest, self).__init__(*args, **kwargs) @classmethod def copy_logs_handler(cls, get_nodes_fn, coe, keypair): """Copy logs closure. This method will retrieve all running nodes for a specified cluster and copy addresses from there locally. :param get_nodes_fn: function that takes no parameters and returns a list of node IPs which are in such form: [[master_nodes], [slave_nodes]]. :param coe: the COE type of the nodes """ def int_copy_logs(): try: cls.LOG.info("Copying logs...") func_name = "test" msg = ("Failed to copy logs for cluster") nodes_addresses = get_nodes_fn() master_nodes = nodes_addresses[0] slave_nodes = nodes_addresses[1] base_path = os.path.split(os.path.dirname( os.path.abspath(magnum.__file__)))[0] full_location = os.path.join(base_path, COPY_LOG_HELPER) def do_copy_logs(prefix, nodes_address): if not nodes_address: return msg = "copy logs from : %s" % ','.join(nodes_address) cls.LOG.info(msg) log_name = prefix + "-" + func_name for node_address in nodes_address: try: cls.LOG.debug("running %s", full_location) cls.LOG.debug("keypair: %s", keypair) subprocess.check_call([ full_location, node_address, coe, log_name, str(keypair) ]) except Exception: cls.LOG.error(msg) msg = ( "failed to copy from %(node_address)s " "to %(base_path)s%(log_name)s-" "%(node_address)s" % {'node_address': node_address, 'base_path': "/opt/stack/logs/cluster-nodes/", 'log_name': log_name}) cls.LOG.exception(msg) do_copy_logs('master', master_nodes) do_copy_logs('node', slave_nodes) except Exception: cls.LOG.exception(msg) return int_copy_logs magnum-6.1.0/magnum/tests/functional/common/config.py0000666000175100017510000001264313244017334022736 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from tempest import config from oslo_config import cfg CONF = config.CONF class Config(object): """Parses configuration to attributes required for auth and test data""" @classmethod def set_admin_creds(cls, config): cls.admin_user = CONF.auth.admin_username cls.admin_passwd = CONF.auth.admin_password # NOTE(toabctl): also allow the old style tempest definition try: cls.admin_tenant = CONF.auth.admin_project_name except cfg.NoSuchOptError: cls.admin_tenant = CONF.auth.admin_tenant_name warnings.warn("the config option 'admin_tenant_name' from the " "'auth' section is deprecated. Please switch " "to 'admin_project_name'.") @classmethod def set_user_creds(cls, config): # normal user creds # Fixme(eliqiao): this is quick workaround to passing tempest # legacy credentials provider is removed by tempest # I8c24cd17f643083dde71ab2bd2a38417c54aeccb. # TODO(eliqiao): find a way to using an accounts.yaml file # check Ia5132c5cb32355d6f26b8acdd92a0e55a2c19f41 cls.user = CONF.auth.admin_username cls.passwd = CONF.auth.admin_password # NOTE(toabctl): also allow the old style tempest definition try: cls.tenant = CONF.auth.admin_project_name except cfg.NoSuchOptError: cls.tenant = CONF.auth.admin_tenant_name warnings.warn("the config option 'admin_tenant_name' from the " "'auth' section is deprecated. Please switch " "to 'admin_project_name'.") @classmethod def set_auth_version(cls, config): # auth version for client authentication cls.auth_version = CONF.identity.auth_version @classmethod def set_auth_url(cls, config): # auth_url for client authentication if cls.auth_version == 'v3': cls.auth_v3_url = CONF.identity.uri_v3 else: if 'uri' not in CONF.identity: raise Exception('config missing auth_url key') cls.auth_url = CONF.identity.uri @classmethod def set_admin_role(cls, config): # admin_role for client authentication if cls.auth_version == 'v3': cls.admin_role = CONF.identity.admin_role else: cls.admin_role = 'admin' @classmethod def set_region(cls, config): if 'region' in CONF.identity: cls.region = CONF.identity.region else: cls.region = 'RegionOne' @classmethod def set_image_id(cls, config): if 'image_id' not in CONF.magnum: raise Exception('config missing image_id key') cls.image_id = CONF.magnum.image_id @classmethod def set_nic_id(cls, config): if 'nic_id' not in CONF.magnum: raise Exception('config missing nic_id key') cls.nic_id = CONF.magnum.nic_id @classmethod def set_keypair_id(cls, config): if 'keypair_id' not in CONF.magnum: raise Exception('config missing keypair_id key') cls.keypair_id = CONF.magnum.keypair_id @classmethod def set_flavor_id(cls, config): if 'flavor_id' not in CONF.magnum: raise Exception('config missing flavor_id key') cls.flavor_id = CONF.magnum.flavor_id @classmethod def set_magnum_url(cls, config): cls.magnum_url = CONF.magnum.get('magnum_url', None) @classmethod def set_master_flavor_id(cls, config): if 'master_flavor_id' not in CONF.magnum: raise Exception('config missing master_flavor_id key') cls.master_flavor_id = CONF.magnum.master_flavor_id @classmethod def set_csr_location(cls, config): if 'csr_location' not in CONF.magnum: raise Exception('config missing csr_location key') cls.csr_location = CONF.magnum.csr_location @classmethod def set_dns_nameserver(cls, config): if 'dns_nameserver' not in CONF.magnum: raise Exception('config missing dns_nameserver') cls.dns_nameserver = CONF.magnum.dns_nameserver @classmethod def set_copy_logs(cls, config): if 'copy_logs' not in CONF.magnum: cls.copy_logs = True cls.copy_logs = str(CONF.magnum.copy_logs).lower() == 'true' @classmethod def setUp(cls): cls.set_admin_creds(config) cls.set_user_creds(config) cls.set_auth_version(config) cls.set_auth_url(config) cls.set_admin_role(config) cls.set_region(config) cls.set_image_id(config) cls.set_nic_id(config) cls.set_keypair_id(config) cls.set_flavor_id(config) cls.set_magnum_url(config) cls.set_master_flavor_id(config) cls.set_csr_location(config) cls.set_dns_nameserver(config) cls.set_copy_logs(config) magnum-6.1.0/magnum/tests/functional/common/models.py0000666000175100017510000000421013244017343022743 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json class BaseModel(object): """Superclass Responsible for converting json data to/from model""" @classmethod def from_json(cls, json_str): return cls.from_dict(json.loads(json_str)) def to_json(self): return json.dumps(self.to_dict()) @classmethod def from_dict(cls, data): model = cls() for key in data: setattr(model, key, data.get(key)) return model def to_dict(self): result = {} for key in self.__dict__: result[key] = getattr(self, key) if isinstance(result[key], BaseModel): result[key] = result[key].to_dict() return result def __str__(self): return "%s" % self.to_dict() class EntityModel(BaseModel): """Superclass resposible from converting dict to instance of model""" @classmethod def from_dict(cls, data): model = super(EntityModel, cls).from_dict(data) if hasattr(model, cls.ENTITY_NAME): val = getattr(model, cls.ENTITY_NAME) setattr(model, cls.ENTITY_NAME, cls.MODEL_TYPE.from_dict(val)) return model class CollectionModel(BaseModel): """Superclass resposible from converting dict to list of models""" @classmethod def from_dict(cls, data): model = super(CollectionModel, cls).from_dict(data) collection = [] if hasattr(model, cls.COLLECTION_NAME): for d in getattr(model, cls.COLLECTION_NAME): collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model magnum-6.1.0/magnum/tests/functional/common/__init__.py0000666000175100017510000000000013244017334023210 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/common/utils.py0000666000175100017510000000702713244017334022631 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import inspect import time import types def def_method(f, *args, **kwargs): @functools.wraps(f) def new_method(self): return f(self, *args, **kwargs) return new_method def parameterized_class(cls): """A class decorator for running parameterized test cases. Mark your class with @parameterized_class. Mark your test cases with @parameterized. """ test_functions = inspect.getmembers(cls, predicate=inspect.ismethod) for (name, f) in test_functions: if name.startswith('test_') and not hasattr(f, '_test_data'): continue # remove the original test function from the class delattr(cls, name) # add a new test function to the class for each entry in f._test_data for tag, args in f._test_data.items(): new_name = "{0}_{1}".format(f.__name__, tag) if hasattr(cls, new_name): raise Exception( "Parameterized test case '{0}.{1}' created from '{0}.{2}' " "already exists".format(cls.__name__, new_name, name)) # Using `def new_method(self): f(self, **args)` is not sufficient # (all new_methods use the same args value due to late binding). # Instead, use this factory function. new_method = def_method(f, **args) # To add a method to a class, available for all instances: # MyClass.method = types.MethodType(f, None, MyClass) setattr(cls, new_name, types.MethodType(new_method, None, cls)) return cls def parameterized(data): """A function decorator for parameterized test cases. Example: @parameterized({ 'zero': dict(val=0), 'one': dict(val=1), }) def test_val(self, val): self.assertEqual(val, self.get_val()) The above will generate two test cases: `test_val_zero` which runs with val=0 `test_val_one` which runs with val=1 :param data: A dictionary that looks like {tag: {arg1: val1, ...}} """ def wrapped(f): f._test_data = data return f return wrapped def wait_for_condition(condition, interval=1, timeout=40): start_time = time.time() end_time = time.time() + timeout while time.time() < end_time: result = condition() if result: return result time.sleep(interval) raise Exception(("Timed out after %s seconds. Started " + "on %s and ended on %s") % (timeout, start_time, end_time)) def memoized(func): """A decorator to cache function's return value""" cache = {} @functools.wraps(func) def wrapper(*args): if not isinstance(args, collections.Hashable): # args is not cacheable. just call the function. return func(*args) if args in cache: return cache[args] else: value = func(*args) cache[args] = value return value return wrapper magnum-6.1.0/magnum/tests/functional/swarm/0000775000175100017510000000000013244017675020760 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/swarm/test_swarm_python_client.py0000666000175100017510000001451013244017334026454 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from docker import errors from requests import exceptions as req_exceptions from magnum.common import docker_utils import magnum.conf from magnum.tests.functional.python_client_base import ClusterTest CONF = magnum.conf.CONF class TestSwarmAPIs(ClusterTest): """This class will cover swarm cluster basic functional testing. Will test all kinds of container action with tls_disabled=False mode. """ coe = "swarm" cluster_template_kwargs = { "tls_disabled": False, "network_driver": None, "volume_driver": None, "labels": {} } @classmethod def setUpClass(cls): super(TestSwarmAPIs, cls).setUpClass() cls.cluster_is_ready = None def setUp(self): super(TestSwarmAPIs, self).setUp() if self.cluster_is_ready is True: return # Note(eliqiao): In our test cases, docker client or magnum client will # try to connect to swarm service which is running on master node, # the endpoint is cluster.api_address(listen port is included), but the # service is not ready right after the cluster was created, sleep for # an acceptable time to wait for service being started. # This is required, without this any api call will fail as # 'ConnectionError: [Errno 111] Connection refused'. msg = ("If you see this error in the functional test, it means " "the docker service took too long to come up. This may not " "be an actual error, so an option is to rerun the " "functional test.") if self.cluster_is_ready is False: # In such case, no need to test below cases on gate, raise a # meanful exception message to indicate ca setup failed after # cluster creation, better to do a `recheck` # We don't need to test since cluster is not ready. raise Exception(msg) url = self.cs.clusters.get(self.cluster.uuid).api_address # Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10, # tested this default configure option not works on gate, it will # cause container creation failed due to time out. # Debug more found that we need to pull image when the first time to # create a container, set it as 180s. docker_api_time_out = 180 self.docker_client = docker_utils.DockerHTTPClient( url, CONF.docker.docker_remote_api_version, docker_api_time_out, client_key=self.key_file, client_cert=self.cert_file, ca_cert=self.ca_file) self.docker_client_non_tls = docker_utils.DockerHTTPClient( url, CONF.docker.docker_remote_api_version, docker_api_time_out) def _container_operation(self, func, *args, **kwargs): # NOTE(hongbin): Swarm cluster occasionally aborts the connection, # so we re-try the operation several times here. In long-term, we # need to investigate the cause of this issue. See bug #1583337. for i in range(150): try: self.LOG.info("Calling function " + func.__name__) return func(*args, **kwargs) except req_exceptions.ConnectionError: self.LOG.info("Connection aborted on calling Swarm API. " "Will retry in 2 seconds.") except errors.APIError as e: if e.response.status_code != 500: raise self.LOG.info("Internal Server Error: " + str(e)) time.sleep(2) raise Exception("Cannot connect to Swarm API.") def _create_container(self, **kwargs): image = kwargs.get('image', 'docker.io/cirros') command = kwargs.get('command', 'ping -c 1000 8.8.8.8') return self._container_operation(self.docker_client.create_container, image=image, command=command) def test_start_stop_container_from_api(self): # Leverage docker client to create a container on the cluster we # created, and try to start and stop it then delete it. resp = self._create_container(image="docker.io/cirros", command="ping -c 1000 8.8.8.8") resp = self._container_operation(self.docker_client.containers, all=True) container_id = resp[0].get('Id') self._container_operation(self.docker_client.start, container=container_id) resp = self._container_operation(self.docker_client.containers) self.assertEqual(1, len(resp)) resp = self._container_operation(self.docker_client.inspect_container, container=container_id) self.assertTrue(resp['State']['Running']) self._container_operation(self.docker_client.stop, container=container_id) resp = self._container_operation(self.docker_client.inspect_container, container=container_id) self.assertEqual(False, resp['State']['Running']) self._container_operation(self.docker_client.remove_container, container=container_id) resp = self._container_operation(self.docker_client.containers) self.assertEqual([], resp) def test_access_with_non_tls_client(self): """Try to contact master's docker using the TCP protocol. TCP returns ConnectionError whereas HTTPS returns SSLError. The default protocol we use in magnum is TCP which works fine docker python SDK docker>=2.0.0. """ self.assertRaises(req_exceptions.ConnectionError, self.docker_client_non_tls.containers) magnum-6.1.0/magnum/tests/functional/swarm/__init__.py0000666000175100017510000000000013244017334023051 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/0000775000175100017510000000000013244017675020400 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/0000775000175100017510000000000013244017675020726 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/models/0000775000175100017510000000000013244017675022211 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py0000666000175100017510000000465313244017334030341 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from magnum.tests.functional.common import models class ClusterTemplatePatchData(models.BaseModel): """Data that encapsulates clustertemplatepatch attributes""" pass class ClusterTemplatePatchEntity(models.EntityModel): """Model that represents a single instance of ClusterTemplatePatchData""" ENTITY_NAME = 'clustertemplatepatch' MODEL_TYPE = ClusterTemplatePatchData class ClusterTemplatePatchCollection(models.CollectionModel): """Model that represents a list of ClusterTemplatePatchData objects""" MODEL_TYPE = ClusterTemplatePatchData COLLECTION_NAME = 'clustertemplatepatchlist' def to_json(self): """Converts ClusterTemplatePatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, ClusterTemplatePatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return json.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to ClusterTemplatePatchData Converts data dict to list of ClusterTemplatePatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model magnum-6.1.0/magnum/tests/functional/api/v1/models/bay_model.py0000666000175100017510000000176613244017334024522 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class BayData(models.BaseModel): """Data that encapsulates bay attributes""" pass class BayEntity(models.EntityModel): """Entity Model that represents a single instance of BayData""" ENTITY_NAME = 'bay' MODEL_TYPE = BayData class BayCollection(models.CollectionModel): """Collection Model that represents a list of BayData objects""" COLLECTION_NAME = 'baylists' MODEL_TYPE = BayData magnum-6.1.0/magnum/tests/functional/api/v1/models/cluster_id_model.py0000666000175100017510000000154613244017334026100 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterIdData(models.BaseModel): """Data that encapsulates ClusterId attributes""" pass class ClusterIdEntity(models.EntityModel): """Entity Model that represents a single instance of CertData""" ENTITY_NAME = 'clusterid' MODEL_TYPE = ClusterIdData magnum-6.1.0/magnum/tests/functional/api/v1/models/baymodel_model.py0000666000175100017510000000205013244017334025526 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class BayModelData(models.BaseModel): """Data that encapsulates baymodel attributes""" pass class BayModelEntity(models.EntityModel): """Entity Model that represents a single instance of BayModelData""" ENTITY_NAME = 'baymodel' MODEL_TYPE = BayModelData class BayModelCollection(models.CollectionModel): """Collection Model that represents a list of BayModelData objects""" COLLECTION_NAME = 'baymodellists' MODEL_TYPE = BayModelData magnum-6.1.0/magnum/tests/functional/api/v1/models/cluster_template_model.py0000666000175100017510000000214713244017334027315 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterTemplateData(models.BaseModel): """Data that encapsulates clustertemplate attributes""" pass class ClusterTemplateEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterTemplateData""" ENTITY_NAME = 'clustertemplate' MODEL_TYPE = ClusterTemplateData class ClusterTemplateCollection(models.CollectionModel): """Collection that represents a list of ClusterTemplateData objects""" COLLECTION_NAME = 'clustertemplatelists' MODEL_TYPE = ClusterTemplateData magnum-6.1.0/magnum/tests/functional/api/v1/models/cluster_model.py0000666000175100017510000000203513244017334025416 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterData(models.BaseModel): """Data that encapsulates cluster attributes""" pass class ClusterEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterData""" ENTITY_NAME = 'cluster' MODEL_TYPE = ClusterData class ClusterCollection(models.CollectionModel): """Collection Model that represents a list of ClusterData objects""" COLLECTION_NAME = 'clusterlists' MODEL_TYPE = ClusterData magnum-6.1.0/magnum/tests/functional/api/v1/models/__init__.py0000666000175100017510000000000013244017334024302 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/models/cert_model.py0000666000175100017510000000152513244017334024675 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class CertData(models.BaseModel): """Data that encapsulates cert attributes""" pass class CertEntity(models.EntityModel): """Entity Model that represents a single instance of CertData""" ENTITY_NAME = 'certificate' MODEL_TYPE = CertData magnum-6.1.0/magnum/tests/functional/api/v1/models/magnum_service_model.py0000666000175100017510000000212013244017334026734 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class MagnumServiceData(models.BaseModel): """Data that encapsulates magnum_service attributes""" pass class MagnumServiceEntity(models.EntityModel): """Entity Model that represents a single instance of MagnumServiceData""" ENTITY_NAME = 'mservice' MODEL_TYPE = MagnumServiceData class MagnumServiceCollection(models.CollectionModel): """Collection Model that represents a list of MagnumServiceData objects""" COLLECTION_NAME = 'mservicelists' MODEL_TYPE = MagnumServiceData magnum-6.1.0/magnum/tests/functional/api/v1/models/baypatch_model.py0000666000175100017510000000441513244017334025534 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from magnum.tests.functional.common import models class BayPatchData(models.BaseModel): """Data that encapsulates baypatch attributes""" pass class BayPatchEntity(models.EntityModel): """Entity Model that represents a single instance of BayPatchData""" ENTITY_NAME = 'baypatch' MODEL_TYPE = BayPatchData class BayPatchCollection(models.CollectionModel): """Collection Model that represents a list of BayPatchData objects""" MODEL_TYPE = BayPatchData COLLECTION_NAME = 'baypatchlist' def to_json(self): """Converts BayPatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, BayPatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return json.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to BayPatchData Converts data dict to list of BayPatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model magnum-6.1.0/magnum/tests/functional/api/v1/models/baymodelpatch_model.py0000666000175100017510000000452313244017334026555 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from magnum.tests.functional.common import models class BayModelPatchData(models.BaseModel): """Data that encapsulates baymodelpatch attributes""" pass class BayModelPatchEntity(models.EntityModel): """Entity Model that represents a single instance of BayModelPatchData""" ENTITY_NAME = 'baymodelpatch' MODEL_TYPE = BayModelPatchData class BayModelPatchCollection(models.CollectionModel): """Collection Model that represents a list of BayModelPatchData objects""" MODEL_TYPE = BayModelPatchData COLLECTION_NAME = 'baymodelpatchlist' def to_json(self): """Converts BayModelPatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, BayModelPatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return json.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to BayModelPatchData Converts data dict to list of BayModelPatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model magnum-6.1.0/magnum/tests/functional/api/v1/models/clusterpatch_model.py0000666000175100017510000000450513244017334026442 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from magnum.tests.functional.common import models class ClusterPatchData(models.BaseModel): """Data that encapsulates clusterpatch attributes""" pass class ClusterPatchEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterPatchData""" ENTITY_NAME = 'clusterpatch' MODEL_TYPE = ClusterPatchData class ClusterPatchCollection(models.CollectionModel): """Collection Model that represents a list of ClusterPatchData objects""" MODEL_TYPE = ClusterPatchData COLLECTION_NAME = 'clusterpatchlist' def to_json(self): """Converts ClusterPatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, ClusterPatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return json.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to ClusterPatchData Converts data dict to list of ClusterPatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model magnum-6.1.0/magnum/tests/functional/api/v1/clients/0000775000175100017510000000000013244017675022367 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/clients/cluster_client.py0000777000175100017510000001437713244017334025771 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest.lib import exceptions from magnum.tests.functional.api.v1.models import cluster_id_model from magnum.tests.functional.api.v1.models import cluster_model from magnum.tests.functional.common import client from magnum.tests.functional.common import utils class ClusterClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" LOG = logging.getLogger(__name__) @classmethod def clusters_uri(cls, filters=None): """Construct clusters uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/clusters" if filters: url = cls.add_filters(url, filters) return url @classmethod def cluster_uri(cls, cluster_id): """Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string """ return "{0}/{1}".format(cls.clusters_uri(), cluster_id) def list_clusters(self, filters=None, **kwargs): """Makes GET /clusters request and returns ClusterCollection Abstracts REST call to return all clusters :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterCollection object """ resp, body = self.get(self.clusters_uri(filters), **kwargs) return self.deserialize(resp, body, cluster_model.ClusterCollection) def get_cluster(self, cluster_id, **kwargs): """Makes GET /cluster request and returns ClusterEntity Abstracts REST call to return a single cluster based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object """ resp, body = self.get(self.cluster_uri(cluster_id)) return self.deserialize(resp, body, cluster_model.ClusterEntity) def post_cluster(self, model, **kwargs): """Makes POST /cluster request and returns ClusterIdEntity Abstracts REST call to create new cluster :param model: ClusterEntity :returns: response object and ClusterIdEntity object """ resp, body = self.post( self.clusters_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): """Makes PATCH /cluster request and returns ClusterIdEntity Abstracts REST call to update cluster attributes :param cluster_id: UUID of cluster :param clusterpatch_listmodel: ClusterPatchCollection :returns: response object and ClusterIdEntity object """ resp, body = self.patch( self.cluster_uri(cluster_id), body=clusterpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) def delete_cluster(self, cluster_id, **kwargs): """Makes DELETE /cluster request and returns response object Abstracts REST call to delete cluster based on uuid or name :param cluster_id: UUID or name of cluster :returns: response object """ return self.delete(self.cluster_uri(cluster_id), **kwargs) def wait_for_cluster_to_delete(self, cluster_id): utils.wait_for_condition( lambda: self.does_cluster_not_exist(cluster_id), 10, 600) def wait_for_created_cluster(self, cluster_id, delete_on_error=True): try: utils.wait_for_condition( lambda: self.does_cluster_exist(cluster_id), 10, 1800) except Exception: # In error state. Clean up the cluster id if desired self.LOG.error('Cluster %s entered an exception state.', cluster_id) if delete_on_error: self.LOG.error('We will attempt to delete clusters now.') self.delete_cluster(cluster_id) self.wait_for_cluster_to_delete(cluster_id) raise def wait_for_final_state(self, cluster_id): utils.wait_for_condition( lambda: self.is_cluster_in_final_state(cluster_id), 10, 1800) def is_cluster_in_final_state(self, cluster_id): try: resp, model = self.get_cluster(cluster_id) if model.status in ['CREATED', 'CREATE_COMPLETE', 'ERROR', 'CREATE_FAILED']: self.LOG.info('Cluster %s succeeded.', cluster_id) return True else: return False except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return False def does_cluster_exist(self, cluster_id): try: resp, model = self.get_cluster(cluster_id) if model.status in ['CREATED', 'CREATE_COMPLETE']: self.LOG.info('Cluster %s is created.', cluster_id) return True elif model.status in ['ERROR', 'CREATE_FAILED']: self.LOG.error('Cluster %s is in fail state.', cluster_id) raise exceptions.ServerFault( "Got into an error condition: %s for %s", (model.status, cluster_id)) else: return False except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return False def does_cluster_not_exist(self, cluster_id): try: self.get_cluster(cluster_id) except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return True return False magnum-6.1.0/magnum/tests/functional/api/v1/clients/magnum_service_client.py0000666000175100017510000000321713244017334027300 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import magnum_service_model from magnum.tests.functional.common import client class MagnumServiceClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" @classmethod def magnum_service_uri(cls, filters=None): """Construct magnum services uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/mservices" if filters: url = cls.add_filters(url, filters) return url def magnum_service_list(self, filters=None, **kwargs): """Makes GET /mservices request and returns MagnumServiceCollection Abstracts REST call to return all magnum services. :param filters: Optional k:v dict that's converted to url query :returns: response object and MagnumServiceCollection object """ resp, body = self.get(self.magnum_service_uri(filters), **kwargs) return self.deserialize(resp, body, magnum_service_model.MagnumServiceCollection) magnum-6.1.0/magnum/tests/functional/api/v1/clients/cluster_template_client.py0000666000175100017510000001040613244017334027646 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import cluster_template_model from magnum.tests.functional.common import client class ClusterTemplateClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" @classmethod def cluster_templates_uri(cls, filters=None): """Construct clustertemplates uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/clustertemplates" if filters: url = cls.add_filters(url, filters) return url @classmethod def cluster_template_uri(cls, cluster_template_id): """Construct cluster_template uri :param cluster_template_id: cluster_template uuid or name :returns: url string """ return "{0}/{1}".format(cls.cluster_templates_uri(), cluster_template_id) def list_cluster_templates(self, filters=None, **kwargs): """Makes GET /clustertemplates request Abstracts REST call to return all clustertemplates :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterTemplateCollection object """ resp, body = self.get(self.cluster_templates_uri(filters), **kwargs) collection = cluster_template_model.ClusterTemplateCollection return self.deserialize(resp, body, collection) def get_cluster_template(self, cluster_template_id, **kwargs): """Makes GET /clustertemplate request and returns ClusterTemplateEntity Abstracts REST call to return a single clustertempalte based on uuid or name :param cluster_template_id: clustertempalte uuid or name :returns: response object and ClusterTemplateCollection object """ resp, body = self.get(self.cluster_template_uri(cluster_template_id)) return self.deserialize(resp, body, cluster_template_model.ClusterTemplateEntity) def post_cluster_template(self, model, **kwargs): """Makes POST /clustertemplate request Abstracts REST call to create new clustertemplate :param model: ClusterTemplateEntity :returns: response object and ClusterTemplateEntity object """ resp, body = self.post( self.cluster_templates_uri(), body=model.to_json(), **kwargs) entity = cluster_template_model.ClusterTemplateEntity return self.deserialize(resp, body, entity) def patch_cluster_template(self, cluster_template_id, cluster_templatepatch_listmodel, **kwargs): """Makes PATCH /clustertemplate and returns ClusterTemplateEntity Abstracts REST call to update clustertemplate attributes :param cluster_template_id: UUID of clustertemplate :param cluster_templatepatch_listmodel: ClusterTemplatePatchCollection :returns: response object and ClusterTemplateEntity object """ resp, body = self.patch( self.cluster_template_uri(cluster_template_id), body=cluster_templatepatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_template_model.ClusterTemplateEntity) def delete_cluster_template(self, cluster_template_id, **kwargs): """Makes DELETE /clustertemplate request and returns response object Abstracts REST call to delete clustertemplate based on uuid or name :param cluster_template_id: UUID or name of clustertemplate :returns: response object """ return self.delete(self.cluster_template_uri(cluster_template_id), **kwargs) magnum-6.1.0/magnum/tests/functional/api/v1/clients/baymodel_client.py0000666000175100017510000000722213244017334026070 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import baymodel_model from magnum.tests.functional.common import client class BayModelClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" @classmethod def baymodels_uri(cls, filters=None): """Construct baymodels uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/baymodels" if filters: url = cls.add_filters(url, filters) return url @classmethod def baymodel_uri(cls, baymodel_id): """Construct baymodel uri :param baymodel_id: baymodel uuid or name :returns: url string """ return "{0}/{1}".format(cls.baymodels_uri(), baymodel_id) def list_baymodels(self, filters=None, **kwargs): """Makes GET /baymodels request and returns BayModelCollection Abstracts REST call to return all baymodels :param filters: Optional k:v dict that's converted to url query :returns: response object and BayModelCollection object """ resp, body = self.get(self.baymodels_uri(filters), **kwargs) return self.deserialize(resp, body, baymodel_model.BayModelCollection) def get_baymodel(self, baymodel_id, **kwargs): """Makes GET /baymodel request and returns BayModelEntity Abstracts REST call to return a single baymodel based on uuid or name :param baymodel_id: baymodel uuid or name :returns: response object and BayModelCollection object """ resp, body = self.get(self.baymodel_uri(baymodel_id)) return self.deserialize(resp, body, baymodel_model.BayModelEntity) def post_baymodel(self, model, **kwargs): """Makes POST /baymodel request and returns BayModelEntity Abstracts REST call to create new baymodel :param model: BayModelEntity :returns: response object and BayModelEntity object """ resp, body = self.post( self.baymodels_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, baymodel_model.BayModelEntity) def patch_baymodel(self, baymodel_id, baymodelpatch_listmodel, **kwargs): """Makes PATCH /baymodel request and returns BayModelEntity Abstracts REST call to update baymodel attributes :param baymodel_id: UUID of baymodel :param baymodelpatch_listmodel: BayModelPatchCollection :returns: response object and BayModelEntity object """ resp, body = self.patch( self.baymodel_uri(baymodel_id), body=baymodelpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, baymodel_model.BayModelEntity) def delete_baymodel(self, baymodel_id, **kwargs): """Makes DELETE /baymodel request and returns response object Abstracts REST call to delete baymodel based on uuid or name :param baymodel_id: UUID or name of baymodel :returns: response object """ return self.delete(self.baymodel_uri(baymodel_id), **kwargs) magnum-6.1.0/magnum/tests/functional/api/v1/clients/bay_client.py0000777000175100017510000001327413244017334025056 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest.lib import exceptions from magnum.tests.functional.api.v1.models import bay_model from magnum.tests.functional.common import client from magnum.tests.functional.common import utils class BayClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" LOG = logging.getLogger(__name__) @classmethod def bays_uri(cls, filters=None): """Construct bays uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/bays" if filters: url = cls.add_filters(url, filters) return url @classmethod def bay_uri(cls, bay_id): """Construct bay uri :param bay_id: bay uuid or name :returns: url string """ return "{0}/{1}".format(cls.bays_uri(), bay_id) def list_bays(self, filters=None, **kwargs): """Makes GET /bays request and returns BayCollection Abstracts REST call to return all bays :param filters: Optional k:v dict that's converted to url query :returns: response object and BayCollection object """ resp, body = self.get(self.bays_uri(filters), **kwargs) return self.deserialize(resp, body, bay_model.BayCollection) def get_bay(self, bay_id, **kwargs): """Makes GET /bay request and returns BayEntity Abstracts REST call to return a single bay based on uuid or name :param bay_id: bay uuid or name :returns: response object and BayCollection object """ resp, body = self.get(self.bay_uri(bay_id)) return self.deserialize(resp, body, bay_model.BayEntity) def post_bay(self, model, **kwargs): """Makes POST /bay request and returns BayEntity Abstracts REST call to create new bay :param model: BayEntity :returns: response object and BayEntity object """ resp, body = self.post( self.bays_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, bay_model.BayEntity) def patch_bay(self, bay_id, baypatch_listmodel, **kwargs): """Makes PATCH /bay request and returns BayEntity Abstracts REST call to update bay attributes :param bay_id: UUID of bay :param baypatch_listmodel: BayPatchCollection :returns: response object and BayEntity object """ resp, body = self.patch( self.bay_uri(bay_id), body=baypatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, bay_model.BayEntity) def delete_bay(self, bay_id, **kwargs): """Makes DELETE /bay request and returns response object Abstracts REST call to delete bay based on uuid or name :param bay_id: UUID or name of bay :returns: response object """ return self.delete(self.bay_uri(bay_id), **kwargs) def wait_for_bay_to_delete(self, bay_id): utils.wait_for_condition( lambda: self.does_bay_not_exist(bay_id), 10, 600) def wait_for_created_bay(self, bay_id, delete_on_error=True): try: utils.wait_for_condition( lambda: self.does_bay_exist(bay_id), 10, 1800) except Exception: # In error state. Clean up the bay id if desired self.LOG.error('Bay %s entered an exception state.', bay_id) if delete_on_error: self.LOG.error('We will attempt to delete bays now.') self.delete_bay(bay_id) self.wait_for_bay_to_delete(bay_id) raise def wait_for_final_state(self, bay_id): utils.wait_for_condition( lambda: self.is_bay_in_final_state(bay_id), 10, 1800) def is_bay_in_final_state(self, bay_id): try: resp, model = self.get_bay(bay_id) if model.status in ['CREATED', 'CREATE_COMPLETE', 'ERROR', 'CREATE_FAILED']: self.LOG.info('Bay %s succeeded.', bay_id) return True else: return False except exceptions.NotFound: self.LOG.warning('Bay %s is not found.', bay_id) return False def does_bay_exist(self, bay_id): try: resp, model = self.get_bay(bay_id) if model.status in ['CREATED', 'CREATE_COMPLETE']: self.LOG.info('Bay %s is created.', bay_id) return True elif model.status in ['ERROR', 'CREATE_FAILED']: self.LOG.error('Bay %s is in fail state.', bay_id) raise exceptions.ServerFault( "Got into an error condition: %s for %s", (model.status, bay_id)) else: return False except exceptions.NotFound: self.LOG.warning('Bay %s is not found.', bay_id) return False def does_bay_not_exist(self, bay_id): try: self.get_bay(bay_id) except exceptions.NotFound: self.LOG.warning('Bay %s is not found.', bay_id) return True return False magnum-6.1.0/magnum/tests/functional/api/v1/clients/__init__.py0000666000175100017510000000000013244017334024460 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/v1/clients/cert_client.py0000666000175100017510000000356413244017334025236 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import cert_model from magnum.tests.functional.common import client class CertClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" url = "/certificates" @classmethod def cert_uri(cls, cluster_id): """Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string """ return "{0}/{1}".format(cls.url, cluster_id) def get_cert(self, cluster_id, **kwargs): """Makes GET /certificates/cluster_id request and returns CertEntity Abstracts REST call to return a single cert based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object """ resp, body = self.get(self.cert_uri(cluster_id), **kwargs) return self.deserialize(resp, body, cert_model.CertEntity) def post_cert(self, model, **kwargs): """Makes POST /certificates request and returns CertEntity Abstracts REST call to sign new certificate :param model: CertEntity :returns: response object and CertEntity object """ resp, body = self.post( CertClient.url, body=model.to_json(), **kwargs) return self.deserialize(resp, body, cert_model.CertEntity) magnum-6.1.0/magnum/tests/functional/api/v1/__init__.py0000666000175100017510000000000013244017334023017 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/api/base.py0000777000175100017510000001365413244017334021672 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import logging from tempest.common import credentials_factory as common_creds from magnum.tests.functional.common import base from magnum.tests.functional.common import config from magnum.tests.functional.common import manager COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" class BaseTempestTest(base.BaseMagnumTest): """Sets up configuration required for functional tests""" ic_class_list = [] ic_method_list = [] LOG = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(BaseTempestTest, self).__init__(*args, **kwargs) @classmethod def setUpClass(cls): super(BaseTempestTest, cls).setUpClass() config.Config.setUp() @classmethod def tearDownClass(cls): super(BaseTempestTest, cls).tearDownClass() cls.clear_credentials(clear_class_creds=True) def tearDown(self): super(BaseTempestTest, self).tearDown() self.clear_credentials(clear_method_creds=True) @classmethod def clear_credentials(cls, clear_class_creds=False, clear_method_creds=False): if clear_class_creds: for ic in cls.ic_class_list: ic.clear_creds() if clear_method_creds: for ic in cls.ic_method_list: ic.clear_creds() @classmethod def get_credentials(cls, name=None, type_of_creds="default", class_cleanup=False): (creds, _) = cls.get_credentials_with_keypair(name, type_of_creds, class_cleanup) return creds @classmethod def get_credentials_with_keypair(cls, name=None, type_of_creds="default", class_cleanup=False): if name is None: # Get name of test method name = inspect.stack()[1][3] if len(name) > 32: name = name[0:32] # Choose type of isolated creds ic = common_creds.get_credentials_provider( name, identity_version=config.Config.auth_version ) if class_cleanup: cls.ic_class_list.append(ic) else: cls.ic_method_list.append(ic) creds = None if "admin" == type_of_creds: creds = ic.get_admin_creds() elif "alt" == type_of_creds: creds = ic.get_alt_creds() elif "default" == type_of_creds: creds = ic.get_primary_creds() else: creds = ic.self.get_credentials(type_of_creds) _, keypairs_client = cls.get_clients( creds, type_of_creds, 'keypair_setup') keypair = None try: keypairs_client.show_keypair(config.Config.keypair_id) except Exception: keypair_body = keypairs_client.create_keypair( name=config.Config.keypair_id) cls.LOG.debug("Keypair body: %s", keypair_body) keypair = keypair_body['keypair']['private_key'] return (creds, keypair) @classmethod def get_clients(cls, creds, type_of_creds, request_type): if "admin" == type_of_creds: manager_inst = manager.AdminManager(credentials=creds.credentials, request_type=request_type) elif "alt" == type_of_creds: manager_inst = manager.AltManager(credentials=creds.credentials, request_type=request_type) elif "default" == type_of_creds: manager_inst = manager.DefaultManager( credentials=creds.credentials, request_type=request_type) else: manager_inst = manager.DefaultManager( credentials=creds.credentials, request_type=request_type) # create client with isolated creds return (manager_inst.client, manager_inst.keypairs_client) @classmethod def get_clients_with_existing_creds(cls, name=None, creds=None, type_of_creds="default", request_type=None, class_cleanup=False): if creds is None: return cls.get_clients_with_new_creds(name, type_of_creds, request_type, class_cleanup) else: return cls.get_clients(creds, type_of_creds, request_type) @classmethod def get_clients_with_new_creds(cls, name=None, type_of_creds="default", request_type=None, class_cleanup=False): """Creates isolated creds. :param name: name, will be used for dynamic creds :param type_of_creds: admin, alt or default :param request_type: ClusterTemplate or service :returns: MagnumClient -- client with isolated creds. :returns: KeypairClient -- allows for creating of keypairs """ creds = cls.get_credentials(name, type_of_creds, class_cleanup) return cls.get_clients(creds, type_of_creds, request_type) magnum-6.1.0/magnum/tests/functional/api/__init__.py0000666000175100017510000000000013244017334022471 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/python_client_base.py0000777000175100017510000005172013244017334024054 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_magnum ---------------------------------- Tests for `magnum` module. """ import os import subprocess import tempfile import time import fixtures from six.moves import configparser from heatclient import client as heatclient from keystoneauth1.identity import v3 as ksa_v3 from keystoneauth1 import session as ksa_session from keystoneclient.v3 import client as ksclient from kubernetes import client as k8s_config from kubernetes.client import api_client from kubernetes.client.apis import core_v1_api from magnum.common.utils import rmtree_without_raise import magnum.conf from magnum.tests.functional.common import base from magnum.tests.functional.common import utils from magnumclient.common.apiclient import exceptions from magnumclient.common import cliutils from magnumclient.v1 import client as v1client CONF = magnum.conf.CONF class BaseMagnumClient(base.BaseMagnumTest): @classmethod def setUpClass(cls): # Collecting of credentials: # # Support the existence of a functional_creds.conf for # testing. This makes it possible to use a config file. super(BaseMagnumClient, cls).setUpClass() user = cliutils.env('OS_USERNAME') passwd = cliutils.env('OS_PASSWORD') project_name = cliutils.env('OS_PROJECT_NAME') auth_url = cliutils.env('OS_AUTH_URL') insecure = cliutils.env('INSECURE') region_name = cliutils.env('OS_REGION_NAME') magnum_url = cliutils.env('BYPASS_URL') image_id = cliutils.env('IMAGE_ID') nic_id = cliutils.env('NIC_ID') flavor_id = cliutils.env('FLAVOR_ID') master_flavor_id = cliutils.env('MASTER_FLAVOR_ID') keypair_id = cliutils.env('KEYPAIR_ID') dns_nameserver = cliutils.env('DNS_NAMESERVER') copy_logs = cliutils.env('COPY_LOGS') user_domain_id = cliutils.env('OS_USER_DOMAIN_ID') project_domain_id = cliutils.env('OS_PROJECT_DOMAIN_ID') config = configparser.RawConfigParser() if config.read('functional_creds.conf'): # the OR pattern means the environment is preferred for # override user = user or config.get('admin', 'user') passwd = passwd or config.get('admin', 'pass') project_name = project_name or config.get('admin', 'project_name') auth_url = auth_url or config.get('auth', 'auth_url') insecure = insecure or config.get('auth', 'insecure') magnum_url = magnum_url or config.get('auth', 'magnum_url') image_id = image_id or config.get('magnum', 'image_id') nic_id = nic_id or config.get('magnum', 'nic_id') flavor_id = flavor_id or config.get('magnum', 'flavor_id') master_flavor_id = master_flavor_id or config.get( 'magnum', 'master_flavor_id') keypair_id = keypair_id or config.get('magnum', 'keypair_id') dns_nameserver = dns_nameserver or config.get( 'magnum', 'dns_nameserver') user_domain_id = user_domain_id or config.get( 'admin', 'user_domain_id') project_domain_id = project_domain_id or config.get( 'admin', 'project_domain_id') try: copy_logs = copy_logs or config.get('magnum', 'copy_logs') except configparser.NoOptionError: pass cls.image_id = image_id cls.nic_id = nic_id cls.flavor_id = flavor_id cls.master_flavor_id = master_flavor_id cls.keypair_id = keypair_id cls.dns_nameserver = dns_nameserver cls.copy_logs = str(copy_logs).lower() == 'true' # NOTE(clenimar): The recommended way to issue clients is by creating # a keystoneauth Session. Using auth parameters (e.g. username and # password) directly is deprecated. _session = cls._get_auth_session(username=user, password=passwd, project_name=project_name, project_domain_id=project_domain_id, user_domain_id=user_domain_id, auth_url=auth_url, insecure=insecure) cls.cs = v1client.Client(session=_session, insecure=insecure, service_type='container-infra', region_name=region_name, magnum_url=magnum_url, api_version='latest') cls.keystone = ksclient.Client(session=_session) # Get heat endpoint from session auth_ref = _session.auth.get_auth_ref(_session) heat_endpoint = auth_ref.service_catalog.url_for( service_type='orchestration') cls.heat = heatclient.Client('1', session=_session, auth=_session.auth, endpoint=heat_endpoint) @classmethod def _get_auth_session(cls, username, password, project_name, project_domain_id, user_domain_id, auth_url, insecure): """Return a `keystoneauth1.session.Session` from auth parameters.""" # create v3Password auth plugin _auth = ksa_v3.Password(username=username, password=password, project_name=project_name, project_domain_id=project_domain_id, user_domain_id=user_domain_id, auth_url=auth_url) # `insecure` is being replaced by `verify`. Please note they have # opposite meanings. verify = False if insecure else True # create a `keystoneauth1.session.Session` _session = ksa_session.Session(auth=_auth, verify=verify) return _session @classmethod def _wait_on_status(cls, cluster, wait_status, finish_status, timeout=6000): # Check status every 60 seconds for a total of 100 minutes def _check_status(): status = cls.cs.clusters.get(cluster.uuid).status cls.LOG.debug("Cluster status is %s", status) if status in wait_status: return False elif status in finish_status: return True else: raise Exception("Unexpected Status: %s" % status) # sleep 1s to wait cluster status changes, this will be useful for # the first time we wait for the status, to avoid another 59s time.sleep(1) utils.wait_for_condition(_check_status, interval=60, timeout=timeout) @classmethod def _create_cluster_template(cls, name, **kwargs): # TODO(eliqiao): We don't want these to be have default values, # just leave them here to make things work. # Plan is to support other kinds of ClusterTemplate # creation. coe = kwargs.pop('coe', 'kubernetes') network_driver = kwargs.pop('network_driver', 'flannel') volume_driver = kwargs.pop('volume_driver', 'cinder') labels = kwargs.pop('labels', {"K1": "V1", "K2": "V2"}) tls_disabled = kwargs.pop('tls_disabled', False) fixed_subnet = kwargs.pop('fixed_subnet', None) server_type = kwargs.pop('server_type', 'vm') cluster_template = cls.cs.cluster_templates.create( name=name, keypair_id=cls.keypair_id, external_network_id=cls.nic_id, image_id=cls.image_id, flavor_id=cls.flavor_id, master_flavor_id=cls.master_flavor_id, network_driver=network_driver, volume_driver=volume_driver, dns_nameserver=cls.dns_nameserver, coe=coe, labels=labels, tls_disabled=tls_disabled, fixed_subnet=fixed_subnet, server_type=server_type, **kwargs) return cluster_template @classmethod def _create_cluster(cls, name, cluster_template_uuid): cluster = cls.cs.clusters.create( name=name, cluster_template_id=cluster_template_uuid ) return cluster @classmethod def _show_cluster(cls, name): cluster = cls.cs.clusters.get(name) return cluster @classmethod def _delete_cluster_template(cls, cluster_template_uuid): cls.cs.cluster_templates.delete(cluster_template_uuid) @classmethod def _delete_cluster(cls, cluster_uuid): cls.cs.clusters.delete(cluster_uuid) try: cls._wait_on_status( cls.cluster, ["CREATE_COMPLETE", "DELETE_IN_PROGRESS", "CREATE_FAILED"], ["DELETE_FAILED", "DELETE_COMPLETE"], timeout=600 ) except exceptions.NotFound: pass else: if cls._show_cluster(cls.cluster.uuid).status == 'DELETE_FAILED': raise Exception("Cluster %s delete failed" % cls.cluster.uuid) @classmethod def get_copy_logs(cls): return cls.copy_logs def _wait_for_cluster_complete(self, cluster): self._wait_on_status( cluster, [None, "CREATE_IN_PROGRESS"], ["CREATE_FAILED", "CREATE_COMPLETE"], timeout=self.cluster_complete_timeout ) if self.cs.clusters.get(cluster.uuid).status == 'CREATE_FAILED': raise Exception("Cluster %s create failed" % cluster.uuid) return cluster class ClusterTest(BaseMagnumClient): # NOTE (eliqiao) coe should be specified in subclasses coe = None cluster_template_kwargs = {} config_contents = """[req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth """ ca_dir = None cluster = None cluster_template = None key_file = None cert_file = None ca_file = None cluster_complete_timeout = 1800 @classmethod def setUpClass(cls): super(ClusterTest, cls).setUpClass() cls.cluster_template = cls._create_cluster_template( cls.__name__, coe=cls.coe, **cls.cluster_template_kwargs) cls.cluster = cls._create_cluster(cls.__name__, cls.cluster_template.uuid) if not cls.cluster_template_kwargs.get('tls_disabled', False): # NOTE (wangbo) with multiple mangum-conductor processes, client # ca files should be created after completion of cluster ca_cert try: cls._wait_on_status( cls.cluster, [None, "CREATE_IN_PROGRESS"], ["CREATE_FAILED", "CREATE_COMPLETE"], timeout=cls.cluster_complete_timeout ) except Exception: # copy logs if setUpClass fails, may be this will not work # as master_address, node_address would not be available, if # not we can get that from nova if cls.copy_logs: cls.copy_logs_handler( cls._get_nodes, cls.cluster_template.coe, 'default') cls._create_tls_ca_files(cls.config_contents) @classmethod def tearDownClass(cls): if cls.ca_dir: rmtree_without_raise(cls.ca_dir) if cls.cluster: cls._delete_cluster(cls.cluster.uuid) if cls.cluster_template: cls._delete_cluster_template(cls.cluster_template.uuid) super(ClusterTest, cls).tearDownClass() def setUp(self): super(ClusterTest, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 60) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid, set a default timeout. test_timeout = CONF.cluster_heat.create_timeout if test_timeout <= 0: test_timeout = CONF.cluster_heat.create_timeout self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) # Copy cluster nodes logs if self.copy_logs: self.addCleanup( self.copy_logs_handler( self._get_nodes, self.cluster_template.coe, 'default')) self._wait_for_cluster_complete(self.cluster) def _get_nodes(self): nodes = self._get_nodes_from_cluster() if not [x for x in nodes if x]: self.LOG.info("the list of nodes from cluster is empty") nodes = self._get_nodes_from_stack() if not [x for x in nodes if x]: self.LOG.info("the list of nodes from stack is empty") self.LOG.info("Nodes are: %s", nodes) return nodes def _get_nodes_from_cluster(self): nodes = [] nodes.append(self.cs.clusters.get(self.cluster.uuid).master_addresses) nodes.append(self.cs.clusters.get(self.cluster.uuid).node_addresses) return nodes def _get_nodes_from_stack(self): nodes = [] stack = self.heat.stacks.get(self.cluster.stack_id) stack_outputs = stack.to_dict().get('outputs', []) output_keys = [] if self.cluster_template.coe == "kubernetes": output_keys = ["kube_masters", "kube_minions"] elif self.cluster_template.coe == "swarm": output_keys = ["swarm_masters", "swarm_nodes"] elif self.cluster_template.coe == "swarm-mode": output_keys = ["swarm_primary_master", "swarm_secondary_masters", "swarm_nodes"] elif self.cluster_template.coe == "mesos": output_keys = ["mesos_master", "mesos_slaves"] for output in stack_outputs: for key in output_keys: if output['output_key'] == key: nodes.append(output['output_value']) return nodes @classmethod def _create_tls_ca_files(cls, client_conf_contents): """Creates ca files by client_conf_contents.""" cls.ca_dir = tempfile.mkdtemp() cls.csr_file = '%s/client.csr' % cls.ca_dir cls.client_config_file = '%s/client.conf' % cls.ca_dir cls.key_file = '%s/client.key' % cls.ca_dir cls.cert_file = '%s/client.crt' % cls.ca_dir cls.ca_file = '%s/ca.crt' % cls.ca_dir with open(cls.client_config_file, 'w') as f: f.write(client_conf_contents) def _write_client_key(): subprocess.call(['openssl', 'genrsa', '-out', cls.key_file, '4096']) def _create_client_csr(): subprocess.call(['openssl', 'req', '-new', '-days', '365', '-key', cls.key_file, '-out', cls.csr_file, '-config', cls.client_config_file]) _write_client_key() _create_client_csr() with open(cls.csr_file, 'r') as f: csr_content = f.read() # magnum ca-sign --cluster secure-k8scluster --csr client.csr \ # > client.crt resp = cls.cs.certificates.create(cluster_uuid=cls.cluster.uuid, csr=csr_content) with open(cls.cert_file, 'w') as f: f.write(resp.pem) # magnum ca-show --cluster secure-k8scluster > ca.crt resp = cls.cs.certificates.get(cls.cluster.uuid) with open(cls.ca_file, 'w') as f: f.write(resp.pem) class BaseK8sTest(ClusterTest): coe = 'kubernetes' @classmethod def setUpClass(cls): super(BaseK8sTest, cls).setUpClass() cls.kube_api_url = cls.cs.clusters.get(cls.cluster.uuid).api_address config = k8s_config.Configuration() config.host = cls.kube_api_url config.ssl_ca_cert = cls.ca_file config.cert_file = cls.cert_file config.key_file = cls.key_file k8s_client = api_client.ApiClient(configuration=config) cls.k8s_api = core_v1_api.CoreV1Api(k8s_client) def setUp(self): super(BaseK8sTest, self).setUp() self.kube_api_url = self.cs.clusters.get(self.cluster.uuid).api_address config = k8s_config.Configuration() config.host = self.kube_api_url config.ssl_ca_cert = self.ca_file config.cert_file = self.cert_file config.key_file = self.key_file k8s_client = api_client.ApiClient(configuration=config) self.k8s_api = core_v1_api.CoreV1Api(k8s_client) # TODO(coreypobrien) https://bugs.launchpad.net/magnum/+bug/1551824 utils.wait_for_condition(self._is_api_ready, 5, 600) def _is_api_ready(self): try: self.k8s_api.list_node() self.LOG.info("API is ready.") return True except Exception: self.LOG.info("API is not ready yet.") return False def test_pod_apis(self): pod_manifest = {'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'color': 'blue', 'name': 'test'}, 'spec': {'containers': [{'image': 'dockerfile/redis', 'name': 'redis'}]}} resp = self.k8s_api.create_namespaced_pod(body=pod_manifest, namespace='default') self.assertEqual('test', resp.metadata.name) self.assertTrue(resp.status.phase) resp = self.k8s_api.read_namespaced_pod(name='test', namespace='default') self.assertEqual('test', resp.metadata.name) self.assertTrue(resp.status.phase) resp = self.k8s_api.delete_namespaced_pod(name='test', body={}, namespace='default') def test_service_apis(self): service_manifest = {'apiVersion': 'v1', 'kind': 'Service', 'metadata': {'labels': {'name': 'frontend'}, 'name': 'frontend', 'resourceversion': 'v1'}, 'spec': {'ports': [{'port': 80, 'protocol': 'TCP', 'targetPort': 80}], 'selector': {'name': 'frontend'}}} resp = self.k8s_api.create_namespaced_service(body=service_manifest, namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertTrue(resp.status) resp = self.k8s_api.read_namespaced_service(name='frontend', namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertTrue(resp.status) resp = self.k8s_api.delete_namespaced_service(name='frontend', namespace='default') def test_replication_controller_apis(self): rc_manifest = { 'apiVersion': 'v1', 'kind': 'ReplicationController', 'metadata': {'labels': {'name': 'frontend'}, 'name': 'frontend'}, 'spec': {'replicas': 2, 'selector': {'name': 'frontend'}, 'template': {'metadata': { 'labels': {'name': 'frontend'}}, 'spec': {'containers': [{ 'image': 'nginx', 'name': 'nginx', 'ports': [{'containerPort': 80, 'protocol': 'TCP'}]}]}}}} resp = self.k8s_api.create_namespaced_replication_controller( body=rc_manifest, namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertEqual(2, resp.spec.replicas) resp = self.k8s_api.read_namespaced_replication_controller( name='frontend', namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertEqual(2, resp.spec.replicas) resp = self.k8s_api.delete_namespaced_replication_controller( name='frontend', body={}, namespace='default') magnum-6.1.0/magnum/tests/functional/k8s_ironic/0000775000175100017510000000000013244017675021677 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s_ironic/__init__.py0000666000175100017510000000000013244017334023770 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s_ironic/test_k8s_python_client.py0000666000175100017510000000212613244017334026747 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestFedoraKubernetesIronicAPIs(base.BaseK8sTest): cluster_complete_timeout = 3200 cluster_template_kwargs = { "tls_disabled": True, "network_driver": 'flannel', "volume_driver": None, "fixed_subnet": 'private-subnet', "server_type": 'bm', "docker_storage_driver": 'overlay', "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "kube_dashboard_enabled": False } } magnum-6.1.0/magnum/tests/functional/k8s_coreos/0000775000175100017510000000000013244017675021706 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s_coreos/__init__.py0000666000175100017510000000000013244017334023777 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/functional/k8s_coreos/test_k8s_python_client.py0000666000175100017510000000167113244017334026762 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestCoreosKubernetesAPIs(base.BaseK8sTest): cluster_template_kwargs = { "tls_disabled": True, "network_driver": 'flannel', "volume_driver": None, "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "kube_dashboard_enabled": False } } magnum-6.1.0/magnum/tests/functional/__init__.py0000666000175100017510000000122413244017334021731 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging logging.basicConfig( filename='functional-tests.log', filemode='w', level=logging.DEBUG, ) magnum-6.1.0/magnum/tests/fakes.py0000666000175100017510000001026413244017334017125 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import time from oslo_service import loopingcall fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', 'X-Project-Name': 'test', 'X-User-Name': 'test', 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', 'X-Service-Catalog': u'{test: 12345}', 'X-Roles': 'role1,role2', 'X-Auth-Url': 'fake_auth_url', 'X-Identity-Status': 'Confirmed', 'X-User-Domain-Name': 'domain', 'X-Project-Domain-Id': 'project_domain_id', 'X-User-Domain-Id': 'user_domain_id', 'OpenStack-API-Version': 'container-infra 1.0' } class FakePecanRequest(mock.Mock): def __init__(self, **kwargs): super(FakePecanRequest, self).__init__(**kwargs) self.host_url = 'http://test_url:8080/test' self.context = {} self.body = '' self.content_type = 'text/unicode' self.params = {} self.path = '/v1/services' self.headers = fakeAuthTokenHeaders self.environ = {} self.version = (1, 0) def __setitem__(self, index, value): setattr(self, index, value) class FakePecanResponse(mock.Mock): def __init__(self, **kwargs): super(FakePecanResponse, self).__init__(**kwargs) self.status = None class FakeApp(object): pass class FakeService(mock.Mock): def __init__(self, **kwargs): super(FakeService, self).__init__(**kwargs) self.__tablename__ = 'service' self.__resource__ = 'services' self.user_id = 'fake user id' self.project_id = 'fake project id' self.uuid = 'test_uuid' self.id = 8 self.name = 'james' self.service_type = 'not_this' self.description = 'amazing' self.tags = ['this', 'and that'] self.read_only = True def as_dict(self): return dict(service_type=self.service_type, user_id=self.user_id, project_id=self.project_id, uuid=self.uuid, id=self.id, name=self.name, tags=self.tags, read_only=self.read_only, description=self.description) class FakeAuthProtocol(mock.Mock): def __init__(self, **kwargs): super(FakeAuthProtocol, self).__init__(**kwargs) self.app = FakeApp() self.config = '' class FakeLoopingCall(object): '''Fake a looping call without the eventlet stuff For tests, just do a simple implementation so that we can ensure the called logic works rather than testing LoopingCall ''' def __init__(self, **kwargs): func = kwargs.pop("f", None) if func is None: raise ValueError("Must pass a callable in the -f kwarg.") self.call_func = func def start(self, interval, **kwargs): intitial_delay = kwargs.pop("initial_delay", -1) stop_on_exception = kwargs.pop("stop_on_exception", True) if intitial_delay: time.sleep(intitial_delay) while True: try: self.call_func() except loopingcall.LoopingCallDone: return 0 except Exception as exc: if stop_on_exception: raise exc if interval: time.sleep(interval) magnum-6.1.0/magnum/tests/policy_fixture.py0000666000175100017510000000221713244017334021100 0ustar zuulzuul00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_policy import _parser from oslo_policy import opts as policy_opts from magnum.common import policy as magnum_policy import magnum.conf CONF = magnum.conf.CONF class PolicyFixture(fixtures.Fixture): def _setUp(self): policy_opts.set_defaults(CONF) magnum_policy._ENFORCER = None self.addCleanup(magnum_policy.init().clear) def set_rules(self, rules): policy = magnum_policy._ENFORCER policy.set_rules({k: _parser.parse_rule(v) for k, v in rules.items()}) magnum-6.1.0/magnum/tests/contrib/0000775000175100017510000000000013244017675017125 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/contrib/post_test_hook.sh0000777000175100017510000002120413244017334022521 0ustar zuulzuul00000000000000#!/bin/bash -x # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. # Sleep some time until all services are starting sleep 5 # Check if a function already exists function function_exists { declare -f -F $1 > /dev/null } # Set up all necessary test data function create_test_data { # First we test Magnum's command line to see if we can stand up # a cluster_template, cluster and a pod coe=$1 special=$2 if [ $coe == 'mesos' ]; then local image_name="ubuntu.*mesos" local container_format="bare" elif [ $coe == 'k8s-coreos' ]; then local image_name="coreos" local container_format="bare" elif [ "${coe}${special}" == 'k8s-ironic' ]; then local bm_flavor_id=$(openstack flavor show baremetal -f value -c id) die_if_not_set $LINENO bm_flavor_id "Failed to get id of baremetal flavor" # NOTE(yuanying): Workaround fix for ironic issue # cf. https://bugs.launchpad.net/ironic/+bug/1596421 echo "alter table ironic.nodes modify instance_info LONGTEXT;" | mysql -uroot -p${MYSQL_PASSWORD} ironic # NOTE(yuanying): Ironic instances need to connect to Internet openstack subnet set private-subnet --dns-nameserver 8.8.8.8 local container_format="ami" else local image_name="atomic" local container_format="bare" fi # if we have the MAGNUM_IMAGE_NAME setting, use it instead # of the default one. In combination with MAGNUM_GUEST_IMAGE_URL # setting, it allows to perform testing on custom images. image_name=${MAGNUM_IMAGE_NAME:-$image_name} export NIC_ID=$(openstack network show public -f value -c id) # We need to filter by container_format to get the appropriate # image. Specifically, when we provide kernel and ramdisk images # we need to select the 'ami' image. Otherwise, when we have # qcow2 images, the format is 'bare'. export IMAGE_ID=$(openstack image list --property container_format=$container_format | grep -i $image_name | awk '{print $2}') #Get magnum_url local magnum_api_ip=$(iniget /etc/magnum/magnum.conf api host) local magnum_api_port=$(iniget /etc/magnum/magnum.conf api port) local magnum_url="http://"$magnum_api_ip":"$magnum_api_port"/v1" local keystone_auth_url=$(iniget /etc/magnum/magnum.conf keystone_authtoken auth_uri) # pass the appropriate variables via a config file CREDS_FILE=$MAGNUM_DIR/functional_creds.conf cat < $CREDS_FILE # Credentials for functional testing [auth] auth_url = $keystone_auth_url magnum_url = $magnum_url username = $OS_USERNAME project_name = $OS_PROJECT_NAME project_domain_id = $OS_PROJECT_DOMAIN_ID user_domain_id = $OS_USER_DOMAIN_ID password = $OS_PASSWORD auth_version = v3 insecure = False [admin] user = $OS_USERNAME project_name = $OS_PROJECT_NAME project_domain_id = $OS_PROJECT_DOMAIN_ID user_domain_id = $OS_USER_DOMAIN_ID pass = $OS_PASSWORD region_name = $OS_REGION_NAME [magnum] image_id = $IMAGE_ID nic_id = $NIC_ID keypair_id = default flavor_id = ${bm_flavor_id:-s1.magnum} master_flavor_id = ${bm_flavor_id:-m1.magnum} copy_logs = true dns_nameserver = 8.8.8.8 EOF # Note(eliqiao): Let's keep this only for debugging on gate. echo_summary $CREDS_FILE cat $CREDS_FILE # Create a keypair for use in the functional tests. echo_summary "Generate a key-pair" # ~/.ssh/id_rsa already exists in multinode setup, so generate # key with different name ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa_magnum openstack keypair create --public-key ~/.ssh/id_rsa_magnum.pub default } function add_flavor { # because of policy.json change in nova, flavor-create is now an admin-only feature # moving this out to only be used by admins # Get admin credentials pushd ../devstack source openrc admin admin popd # Create magnum specific flavor for use in functional tests. echo_summary "Create a flavor" if [[ "$DEVSTACK_GATE_TOPOLOGY" = "multinode" ]] ; then local flavor_ram="3750" local flavor_disk="20" local flavor_vcpus="2" fi openstack flavor create m1.magnum --id 100 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-1} openstack flavor create s1.magnum --id 200 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-1} } if ! function_exists echo_summary; then function echo_summary { echo $@ } fi # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo_summary "magnum's post_test_hook.sh was called..." (set -o posix; set) # source it to make sure to get REQUIREMENTS_DIR source $BASE/new/devstack/stackrc constraints="-c $REQUIREMENTS_DIR/upper-constraints.txt" sudo -H pip install $constraints -U -r requirements.txt -r test-requirements.txt export MAGNUM_DIR="$BASE/new/magnum" sudo chown -R $USER:stack $MAGNUM_DIR # Run functional tests # Currently we support functional-api, functional-k8s, will support swarm, # mesos later. echo "Running magnum functional test suite for $1" # For api, we will run tempest tests coe=$1 special=$2 if [[ "-ironic" != "$special" ]]; then add_flavor fi # Get admin credentials pushd ../devstack source openrc admin admin popd create_test_data $coe $special _magnum_tests="" target="${coe}${special}" if [[ "api" == "$coe" ]]; then sudo chown -R $USER:stack $BASE/new/tempest export TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf # Set up tempest config with magnum goodness iniset $TEMPEST_CONFIG magnum image_id $IMAGE_ID iniset $TEMPEST_CONFIG magnum nic_id $NIC_ID iniset $TEMPEST_CONFIG magnum keypair_id default iniset $TEMPEST_CONFIG magnum flavor_id s1.magnum iniset $TEMPEST_CONFIG magnum master_flavor_id m1.magnum iniset $TEMPEST_CONFIG magnum copy_logs True # show tempest config with magnum cat $TEMPEST_CONFIG # tempest tox env is looking for /etc/tempest/tempest.conf sudo mkdir -p /etc/tempest sudo cp $TEMPEST_CONFIG /etc/tempest/tempest.conf # strigazi: don't run test_create_list_sign_delete_clusters because # it is very unstable in the CI _magnum_tests="magnum_tempest_plugin.tests.api.v1.test_bay" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_baymodel" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster_template" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster_template_admin" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_magnum_service" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_create_cluster_for_nonexisting_cluster_template" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_create_cluster_with_node_count_0" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_create_cluster_with_nonexisting_flavor" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_create_cluster_with_zero_masters" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_delete_cluster_for_nonexisting_cluster" _magnum_tests="$_magnum_tests magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_update_cluster_for_nonexisting_cluster" pushd $BASE/new/magnum-tempest-plugin sudo cp $CREDS_FILE . sudo -E -H -u $USER tox -e functional-"$target" $_magnum_tests -- --concurrency=1 EXIT_CODE=$? popd else sudo -E -H -u $USER tox -e functional-"$target" $_magnum_tests -- --concurrency=1 EXIT_CODE=$? fi # Delete the keypair used in the functional test. echo_summary "Running keypair-delete" openstack keypair delete default if [[ "-ironic" != "$special" ]]; then # Delete the flavor used in the functional test. echo_summary "Running flavor-delete" openstack flavor delete m1.magnum openstack flavor delete s1.magnum fi # Save functional testing log sudo cp $MAGNUM_DIR/functional-tests.log /opt/stack/logs/ # Save functional_creds.conf sudo cp $CREDS_FILE /opt/stack/logs/ # Restore xtrace $XTRACE exit $EXIT_CODE magnum-6.1.0/magnum/tests/contrib/copy_instance_logs.sh0000777000175100017510000002461613244017334023351 0ustar zuulzuul00000000000000#!/usr/bin/env bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo "Magnum's copy_instance_logs.sh was called..." SSH_IP=$1 COE=${2-kubernetes} NODE_TYPE=${3-master} LOG_PATH=/opt/stack/logs/cluster-nodes/${NODE_TYPE}-${SSH_IP} KEYPAIR=${4-default} PRIVATE_KEY= echo "If private key is specified, save to temp and use that; else, use default" if [[ "$KEYPAIR" == "default" ]]; then PRIVATE_KEY=$(readlink -f ~/.ssh/id_rsa_magnum) else PRIVATE_KEY="$(mktemp id_rsa_magnum.$SSH_IP.XXX)" echo -en "$KEYPAIR" > $PRIVATE_KEY fi function remote_exec { local ssh_user=$1 local cmd=$2 local logfile=${LOG_PATH}/$3 ssh -i $PRIVATE_KEY -o StrictHostKeyChecking=no ${ssh_user}@${SSH_IP} "${cmd}" > ${logfile} 2>&1 } mkdir -p $LOG_PATH cat /proc/cpuinfo > /opt/stack/logs/cpuinfo.log if [[ "$COE" == "kubernetes" ]]; then SSH_USER=fedora remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log remote_exec $SSH_USER "sudo journalctl -u kubelet --no-pager" kubelet.log remote_exec $SSH_USER "sudo journalctl -u kube-proxy --no-pager" kube-proxy.log remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log remote_exec $SSH_USER "sudo journalctl -u kube-apiserver --no-pager" kube-apiserver.log remote_exec $SSH_USER "sudo journalctl -u kube-scheduler --no-pager" kube-scheduler.log remote_exec $SSH_USER "sudo journalctl -u kube-controller-manager --no-pager" kube-controller-manager.log remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log remote_exec $SSH_USER "sudo ip a" ipa.log remote_exec $SSH_USER "sudo netstat -an" netstat.log remote_exec $SSH_USER "sudo df -h" dfh.log remote_exec $SSH_USER "sudo journalctl -u wc-notify --no-pager" wc-notify.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params remote_exec $SSH_USER "sudo cat /etc/etcd/etcd.conf" etcd.conf remote_exec $SSH_USER "sudo cat /etc/kubernetes/config" kubernetes-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/apiserver" kubernetes-apiserver-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/controller-manager" kubernetes-controller-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubelet" kubelet-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/proxy" kubernetes-proxy-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubeconfig.yaml" kubeconfig.yaml remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/manifests/*" kubernetes-manifests remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/certs/*" kubernetes-certs remote_exec $SSH_USER "sudo cat /usr/local/bin/wc-notify" bin-wc-notify remote_exec $SSH_USER "sudo cat /etc/kubernetes/kube_openstack_config" kube_openstack_config remote_exec $SSH_USER "sudo cat /etc/sysconfig/flanneld" flanneld.sysconfig remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-config" bin-flannel-config remote_exec $SSH_USER "sudo cat /etc/sysconfig/flannel-network.json" flannel-network.json.sysconfig remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-docker-bridge" bin-flannel-docker-bridge remote_exec $SSH_USER "sudo cat /etc/systemd/system/docker.service.d/flannel.conf" docker-flannel.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf" flannel-docker-bridge.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-docker-bridge.service" flannel-docker-bridge.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-config.service" flannel-config.service remote_exec $SSH_USER "sudo journalctl -u heat-container-agent --no-pager" heat-container-agent.log remote_exec $SSH_USER "sudo journalctl -u kube-enable-monitoring --no-pager" kube-enable-monitoring.service.log remote_exec $SSH_USER "sudo atomic containers list" atomic-containers-list.log remote_exec $SSH_USER "sudo atomic images list" atomic-images-list.log elif [[ "$COE" == "swarm" || "$COE" == "swarm-mode" ]]; then SSH_USER=fedora remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log remote_exec $SSH_USER "sudo journalctl -u swarm-manager --no-pager" swarm-manager.log remote_exec $SSH_USER "sudo journalctl -u swarm-agent --no-pager" swarm-agent.log remote_exec $SSH_USER "sudo journalctl -u swarm-worker --no-pager" swarm-worker.log remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log remote_exec $SSH_USER "sudo journalctl -u docker-containerd --no-pager" docker-containerd.log remote_exec $SSH_USER "sudo systemctl status docker.socket -l" docker.socket.status.log remote_exec $SSH_USER "sudo systemctl show docker.socket --no-pager" docker.socket.show.log remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log remote_exec $SSH_USER "sudo ip a" ipa.log remote_exec $SSH_USER "sudo netstat -an" netstat.log remote_exec $SSH_USER "sudo df -h" dfh.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params remote_exec $SSH_USER "sudo cat /etc/etcd/etcd.conf" etcd.conf remote_exec $SSH_USER "sudo ls -lR /etc/docker" docker-certs remote_exec $SSH_USER "sudo cat /etc/sysconfig/flanneld" flanneld.sysconfig remote_exec $SSH_USER "sudo cat /etc/sysconfig/flannel-network.json" flannel-network.json.sysconfig remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-docker-bridge" bin-flannel-docker-bridge remote_exec $SSH_USER "sudo cat /etc/systemd/system/docker.service.d/flannel.conf" docker-flannel.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf" flannel-docker-bridge.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-docker-bridge.service" flannel-docker-bridge.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/swarm-manager.service" swarm-manager.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/swarm-manager-failure.service" swarm-manager-failure.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/swarm-agent.service" swarm-agent.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/swarm-agent-failure.service" swarm-agent-failure.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/swarm-worker.service" swarm-worker.service remote_exec $SSH_USER "sudo cat /usr/local/bin/magnum-start-swarm-manager" bin-magnum-start-swarm-manager remote_exec $SSH_USER "sudo cat /usr/local/bin/magnum-start-swarm-worker" bin-magnum-start-swarm-worker else echo "ERROR: Unknown COE '${COE}'" EXIT_CODE=1 fi # Restore xtrace $XTRACE exit $EXIT_CODE magnum-6.1.0/magnum/tests/contrib/gate_hook.sh0000777000175100017510000001155013244017334021420 0ustar zuulzuul00000000000000#!/bin/bash -x # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed inside gate_hook function in devstack gate. coe=$1 special=$2 export PROJECTS="openstack/barbican $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service horizon" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acentral" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acompute" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-evaluator" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-notifier" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-api" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-collector" if egrep --quiet '(vmx|svm)' /proc/cpuinfo; then export DEVSTACK_GATE_LIBVIRT_TYPE=kvm fi if [[ -e /etc/ci/mirror_info.sh ]]; then source /etc/ci/mirror_info.sh fi NODEPOOL_ATOMIC_MIRROR=${NODEPOOL_FEDORA_MIRROR:-https://download.fedoraproject.org/pub/alt} if [ "$coe" = "mesos" ]; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL=https://fedorapeople.org/groups/magnum/ubuntu-mesos-ocata.qcow2" elif [ "$coe" = "k8s-coreos" ]; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL=http://beta.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2" elif [ "${coe}${special}" = "k8s-ironic" ]; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='https://fedorapeople.org/groups/magnum/fedora-kubernetes-ironic-latest.tar.gz'" export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='fedora-kubernetes-ironic-latest'" export DEVSTACK_GATE_VIRT_DRIVER="ironic" # NOTE(strigazi) keep cinder # export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service cinder c-sch c-api c-vol" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER=pxe_ipmitool" # NOTE(ykarel) Ironic to work with magnum, requires devstack to be configured with IP_VERSION=4 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IP_VERSION=4" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BAREMETAL_BASIC_OPS=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_LOG_DIR=/opt/stack/new/ironic-bm-logs" export DEVSTACK_LOCAL_CONFIG+=$'\n'"DEFAULT_INSTANCE_TYPE=baremetal" export DEVSTACK_LOCAL_CONFIG+=$'\n'"BUILD_TIMEOUT=600" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_CALLBACK_TIMEOUT=600" export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_AGENT=openvswitch" export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_ML2_TENANT_NETWORK_TYPE=vxlan" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BUILD_DEPLOY_RAMDISK=False" # We don't enable swift in Gate Jobs so not required # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_ENABLE_TEMPURLS=True" # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_TEMPURL_KEY=password" # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_HASH=password" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_ENABLED_DRIVERS=fake,agent_ipmitool,pxe_ipmitool" export DEVSTACK_LOCAL_CONFIG+=$'\n'"VOLUME_BACKING_FILE_SIZE=24G" export DEVSTACK_LOCAL_CONFIG+=$'\n'"FORCE_CONFIG_DRIVE=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_IPXE_ENABLED=False" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=2" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SSH_PORT=22" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1024" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_DISK=10" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=5" else export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='${NODEPOOL_ATOMIC_MIRROR}/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2'" export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='Fedora-Atomic-27-20180212.2.x86_64'" fi # Enable magnum plugin in the last step export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin magnum git://git.openstack.org/openstack/magnum" $BASE/new/devstack-gate/devstack-vm-gate.sh magnum-6.1.0/magnum/tests/base.py0000666000175100017510000001221013244017334016737 0ustar zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import fixtures import mock from oslo_config import cfg from oslo_log import log import oslo_messaging from oslotest import base import pecan import testscenarios from magnum.common import context as magnum_context from magnum.common import keystone as magnum_keystone from magnum.objects import base as objects_base from magnum.tests import conf_fixture from magnum.tests import fake_notifier from magnum.tests import policy_fixture CONF = cfg.CONF try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass CONF.set_override('use_stderr', False) class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): """Test base class.""" def setUp(self): super(BaseTestCase, self).setUp() self.addCleanup(cfg.CONF.reset) class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): super(TestCase, self).setUp() token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } trustee_domain_id = '12345678-9012-3456-7890-123456789abc' self.context = magnum_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user', is_admin=False) self.global_mocks = {} self.keystone_client = magnum_keystone.KeystoneClientV3(self.context) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.useFixture(fixtures.MockPatchObject( oslo_messaging, 'Notifier', fake_notifier.FakeNotifier)) self.addCleanup(fake_notifier.reset) def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' if not kwargs.get('is_admin'): kwargs['is_admin'] = False context = magnum_context.RequestContext(*args, **kwargs) return magnum_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(magnum_context, 'make_context', side_effect=make_context) self.global_mocks['magnum.common.context.make_context'] = p q = mock.patch.object(magnum_keystone.KeystoneClientV3, 'trustee_domain_id', return_value=trustee_domain_id) self.global_mocks[ 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id'] = q self.mock_make_context = p.start() self.addCleanup(p.stop) self.mock_make_trustee_domain_id = q.start() self.addCleanup(q.stop) self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.NestedTempfile()) self._base_test_obj_backup = copy.copy( objects_base.MagnumObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def start_global(self, name): self.global_mocks[name].start() def stop_global(self, name): self.global_mocks[name].stop() def _restore_obj_registry(self): objects_base.MagnumObjectRegistry._registry._obj_classes \ = self._base_test_obj_backup def config(self, **kw): """Override config options for a test.""" group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) def get_path(self, project_file=None): """Get the absolute path to a file. Used for testing the API. :param project_file: File whose path to return. Default: None. :returns: path to the specified file, or path to project root. """ root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root magnum-6.1.0/magnum/tests/__init__.py0000666000175100017510000000000013244017334017556 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/0000775000175100017510000000000013244017675016444 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/0000775000175100017510000000000013244017675020444 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/tasks/0000775000175100017510000000000013244017675021571 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/tasks/test_heat_tasks.py0000666000175100017510000001151713244017334025327 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from taskflow import engines from taskflow.patterns import linear_flow from magnum.conductor.tasks import heat_tasks from magnum.tests import base class HeatTasksTests(base.TestCase): def setUp(self): super(HeatTasksTests, self).setUp() self.heat_client = mock.MagicMock(name='heat_client') def _get_create_stack_flow(self, heat_client): flow = linear_flow.Flow("create stack flow") flow.add( heat_tasks.CreateStack( os_client=heat_client, requires=('stack_name', 'parameters', 'template', 'files'), provides='new_stack', ), ) return flow def _get_update_stack_flow(self, heat_client): flow = linear_flow.Flow("update stack flow") flow.add( heat_tasks.UpdateStack( os_client=heat_client, requires=('stack_id', 'parameters', 'template', 'files'), ), ) return flow def _get_delete_stack_flow(self, heat_client): flow = linear_flow.Flow("delete stack flow") flow.add( heat_tasks.DeleteStack( os_client=heat_client, requires=('stack_id'), ), ) return flow def test_create_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' stack_name = 'stack_name' stack = { 'stack': { 'id': stack_id } } heat_client.stacks.create.return_value = stack flow_store = { 'stack_name': stack_name, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_create_stack_flow(heat_client) result = engines.run(flow, store=flow_store) heat_client.stacks.create.assert_called_once_with(**flow_store) self.assertEqual(stack_id, result['new_stack']['stack']['id']) def test_create_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.create.side_effect = ValueError stack_name = 'stack_name' flow_store = { 'stack_name': stack_name, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_create_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) def test_update_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' flow_store = { 'stack_id': stack_id, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_update_stack_flow(heat_client) expected_params = dict(flow_store) del expected_params['stack_id'] engines.run(flow, store=flow_store) heat_client.stacks.update.assert_called_once_with(stack_id, **expected_params) def test_update_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.update.side_effect = ValueError stack_id = 'stack_id' flow_store = { 'stack_id': stack_id, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_update_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) def test_delete_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' flow_store = {'stack_id': stack_id} flow = self._get_delete_stack_flow(heat_client) engines.run(flow, store=flow_store) heat_client.stacks.delete.assert_called_once_with(stack_id) def test_delete_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.delete.side_effect = ValueError stack_id = 'stack_id' flow_store = {'stack_id': stack_id} flow = self._get_delete_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) magnum-6.1.0/magnum/tests/unit/conductor/tasks/__init__.py0000666000175100017510000000000013244017334023662 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/handlers/0000775000175100017510000000000013244017675022244 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py0000666000175100017510000006501613244017334030631 0ustar zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch import magnum.conf from magnum.drivers.heat import driver as heat_driver from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import base CONF = magnum.conf.CONF class TestClusterConductorWithSwarm(base.TestCase): def setUp(self): super(TestClusterConductorWithSwarm, self).setUp() self.cluster_template_dict = { 'image_id': 'image_id', 'flavor_id': 'flavor_id', 'master_flavor_id': 'master_flavor_id', 'keypair_id': 'keypair_id', 'dns_nameserver': 'dns_nameserver', 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'external_network_id': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'cluster_distro': 'fedora-atomic', 'coe': 'swarm', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'tls_disabled': False, 'registry_enabled': False, 'server_type': 'vm', 'network_driver': 'network_driver', 'labels': {'docker_volume_type': 'lvmdriver-1', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'rexray_preempt': 'False', 'swarm_strategy': 'spread', 'availability_zone': 'az_1'}, 'master_lb_enabled': False, 'volume_driver': 'rexray' } self.cluster_dict = { 'id': 1, 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'cluster_template_id': 'xx-xx-xx-xx', 'keypair': 'keypair_id', 'flavor_id': 'flavor_id', 'docker_volume_size': 20, 'master_flavor_id': 'master_flavor_id', 'name': 'cluster1', 'stack_id': 'xx-xx-xx-xx', 'api_address': '172.17.2.3', 'node_addresses': ['172.17.2.4'], 'master_count': 1, 'node_count': 1, 'discovery_url': 'https://discovery.test.io/123456789', 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'labels': {'docker_volume_type': 'lvmdriver-1', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'rexray_preempt': 'False', 'swarm_strategy': 'spread', 'availability_zone': 'az_1'}, 'coe_version': 'fake-version' } # We need this due to volume_driver=rexray CONF.set_override('cluster_user_trust', True, group='trust') osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') self.mock_osc_class = osc_patcher.start() self.addCleanup(osc_patcher.stop) self.mock_osc = mock.MagicMock() self.mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' self.mock_keystone = mock.MagicMock() self.mock_keystone.trustee_domain_id = 'trustee_domain_id' self.mock_osc.keystone.return_value = self.mock_keystone self.mock_osc_class.return_value = self.mock_osc @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_all_values( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 1, 'number_of_nodes': 1, 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_with_registry( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['registry_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) CONF.set_override('swift_region', 'RegionOne', group='docker_registry') (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 1, 'number_of_nodes': 1, 'docker_volume_size': 20, 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': True, 'registry_container': 'docker_registry', 'swift_region': 'RegionOne', 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'docker_storage_driver': 'devicemapper', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_only_required( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): not_required = ['image_id', 'flavor_id', 'dns_nameserver', 'docker_volume_size', 'fixed_network', 'http_proxy', 'https_proxy', 'no_proxy', 'network_driver', 'master_flavor_id', 'docker_storage_driver', 'volume_driver', 'rexray_preempt', 'fixed_subnet', 'docker_volume_type', 'availablity_zone'] for key in not_required: self.cluster_template_dict[key] = None self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'number_of_masters': 1, 'number_of_nodes': 1, 'discovery_url': 'https://discovery.etcd.io/test', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'flannel_network_cidr': u'10.101.0.0/16', 'flannel_network_subnetlen': u'26', 'flannel_backend': u'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'docker_volume_size': 20, 'master_flavor': 'master_flavor_id', 'verify_ca': True, 'node_flavor': 'flavor_id', 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/with_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_with_lb_neutron( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['master_lb_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 1, 'number_of_nodes': 1, 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/with_master_lb.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_with_lb_octavia( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['master_lb_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) class Service(object): def __init__(self): self.enabled = True mock_kc.return_value.client.services.list.return_value = [Service()] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 1, 'number_of_nodes': 1, 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/with_master_lb_octavia.yaml' ], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_multi_master( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['master_lb_enabled'] = True self.cluster_dict['master_count'] = 2 cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"2","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 2, 'number_of_nodes': 1, 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/with_master_lb.yaml'], env_files) @patch('magnum.conductor.utils.retrieve_cluster_template') @patch('magnum.conf.CONF') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def setup_poll_test(self, mock_driver, mock_openstack_client, mock_conf, mock_retrieve_cluster_template): mock_conf.cluster_heat.max_attempts = 10 cluster = mock.MagicMock() mock_heat_stack = mock.MagicMock() mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client.heat.return_value = mock_heat_client cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_retrieve_cluster_template.return_value = \ cluster_template mock_driver.return_value = swarm_dr.Driver() poller = heat_driver.HeatPoller(mock_openstack_client, mock.MagicMock(), cluster, swarm_dr.Driver()) poller.get_version_info = mock.MagicMock() return (mock_heat_stack, cluster, poller) def test_poll_node_count(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_nodes': 1} mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() self.assertEqual(1, cluster.node_count) def test_poll_node_count_by_update(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_nodes': 2} mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE poller.poll_and_check() self.assertEqual(2, cluster.node_count) magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_indirection_api.py0000666000175100017510000000645713244017334027023 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging as messaging from oslo_versionedobjects import fields from magnum.conductor.handlers import indirection_api from magnum.objects import base as obj_base from magnum.tests import base class TestIndirectionApiConductor(base.TestCase): def setUp(self): super(TestIndirectionApiConductor, self).setUp() self.conductor = indirection_api.Handler() def _test_object_action(self, is_classmethod, raise_exception): @obj_base.MagnumObjectRegistry.register class TestObject(obj_base.MagnumObject): def foo(self, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' @classmethod def bar(cls, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' obj = TestObject() if is_classmethod: result = self.conductor.object_class_action( self.context, TestObject.obj_name(), 'bar', '1.0', tuple(), {'raise_exception': raise_exception}) else: updates, result = self.conductor.object_action( self.context, obj, 'foo', tuple(), {'raise_exception': raise_exception}) self.assertEqual('test', result) def test_object_action(self): self._test_object_action(False, False) def test_object_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, False, True) def test_object_class_action(self): self._test_object_action(True, False) def test_object_class_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, True, True) def test_object_action_copies_object(self): @obj_base.MagnumObjectRegistry.register class TestObject(obj_base.MagnumObject): fields = {'dict': fields.DictOfStringsField()} def touch_dict(self, context): self.dict['foo'] = 'bar' self.obj_reset_changes() obj = TestObject() obj.dict = {} obj.obj_reset_changes() updates, result = self.conductor.object_action( self.context, obj, 'touch_dict', tuple(), {}) # NOTE(danms): If conductor did not properly copy the object, then # the new and reference copies of the nested dict object will be # the same, and thus 'dict' will not be reported as changed self.assertIn('dict', updates) self.assertEqual({'foo': 'bar'}, updates['dict']) magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_conductor_listener.py0000666000175100017510000000156113244017334027557 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor.handlers import conductor_listener from magnum.tests import base class TestHandler(base.BaseTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = conductor_listener.Handler() def test_ping_conductor(self): self.assertTrue(self.handler.ping_conductor({})) magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py0000666000175100017510000004604013244017334030622 0ustar zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch from magnum.drivers.heat import driver as heat_driver from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import base class TestClusterConductorWithMesos(base.TestCase): def setUp(self): super(TestClusterConductorWithMesos, self).setUp() self.cluster_template_dict = { 'image_id': 'image_id', 'flavor_id': 'flavor_id', 'master_flavor_id': 'master_flavor_id', 'keypair_id': 'keypair_id', 'dns_nameserver': 'dns_nameserver', 'external_network_id': 'external_network_id', 'cluster_distro': 'ubuntu', 'coe': 'mesos', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'registry_enabled': False, 'server_type': 'vm', 'volume_driver': 'volume_driver', 'labels': {'rexray_preempt': 'False', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_image_providers': 'docker', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_work_dir': '/tmp/mesos/slave' }, 'master_lb_enabled': False, 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', } self.cluster_dict = { 'id': 1, 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'cluster_template_id': 'xx-xx-xx-xx', 'keypair': 'keypair_id', 'master_flavor_id': 'master_flavor_id', 'flavor_id': 'flavor_id', 'name': 'cluster1', 'stack_id': 'xx-xx-xx-xx', 'api_address': '172.17.2.3', 'node_addresses': ['172.17.2.4'], 'node_count': 1, 'master_count': 1, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'labels': {'rexray_preempt': 'False', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_image_providers': 'docker', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_work_dir': '/tmp/mesos/slave' }, } self.context.user_name = 'mesos_user' self.context.project_id = 'admin' self.context.domain_name = 'domainname' osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') self.mock_osc_class = osc_patcher.start() self.addCleanup(osc_patcher.stop) self.mock_osc = mock.MagicMock() self.mock_osc.cinder_region_name.return_value = 'RegionOne' self.mock_keystone = mock.MagicMock() self.mock_keystone.trustee_domain_id = 'trustee_domain_id' self.mock_osc.keystone.return_value = self.mock_keystone self.mock_osc_class.return_value = self.mock_osc self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_all_values( self, mock_driver, mock_objects_cluster_template_get_by_uuid): cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'slave_flavor': 'flavor_id', 'number_of_slaves': 1, 'number_of_masters': 1, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'volume_driver': 'volume_driver', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'verify_ca': True, 'openstack_ca': '', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_master_lb.yaml'], env_files) @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_only_required( self, mock_driver, mock_objects_cluster_template_get_by_uuid): not_required = ['image_id', 'master_flavor_id', 'flavor_id', 'dns_nameserver', 'fixed_network', 'http_proxy', 'https_proxy', 'no_proxy', 'volume_driver', 'fixed_subnet'] for key in not_required: self.cluster_template_dict[key] = None cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'number_of_slaves': 1, 'number_of_masters': 1, 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'master_flavor': 'master_flavor_id', 'verify_ca': True, 'slave_flavor': 'flavor_id', 'openstack_ca': '', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/with_private_network.yaml', '../../common/templates/environments/no_master_lb.yaml'], env_files) @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_with_lb_neutron( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'slave_flavor': 'flavor_id', 'number_of_slaves': 1, 'number_of_masters': 1, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'volume_driver': 'volume_driver', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'verify_ca': True, 'openstack_ca': '', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_master_lb.yaml'], env_files) @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_with_lb_octavia( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() class Service(object): def __init__(self): self.enabled = True mock_kc.return_value.client.services.list.return_value = [Service()] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'slave_flavor': 'flavor_id', 'number_of_slaves': 1, 'number_of_masters': 1, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'volume_driver': 'volume_driver', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'verify_ca': True, 'openstack_ca': '', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_master_lb_octavia.yaml' ], env_files) @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_multi_master( self, mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True self.cluster_dict['master_count'] = 2 cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'slave_flavor': 'flavor_id', 'number_of_slaves': 1, 'number_of_masters': 2, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'volume_driver': 'volume_driver', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'verify_ca': True, 'openstack_ca': '', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_master_lb.yaml'], env_files) @patch('magnum.conductor.utils.retrieve_cluster_template') @patch('magnum.conf.CONF') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def setup_poll_test(self, mock_driver, mock_openstack_client, mock_conf, mock_retrieve_cluster_template): mock_conf.cluster_heat.max_attempts = 10 cluster = mock.MagicMock() mock_heat_stack = mock.MagicMock() mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client.heat.return_value = mock_heat_client mock_driver.return_value = mesos_dr.Driver() cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_retrieve_cluster_template.return_value = cluster_template poller = heat_driver.HeatPoller(mock_openstack_client, mock.MagicMock(), cluster, mesos_dr.Driver()) poller.get_version_info = mock.MagicMock() return (mock_heat_stack, cluster, poller) def test_poll_node_count(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_slaves': 1} mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() self.assertEqual(1, cluster.node_count) def test_poll_node_count_by_update(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_slaves': 2} mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE poller.poll_and_check() self.assertEqual(2, cluster.node_count) magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py0000666000175100017510000012577313244017334030214 0ustar zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch import magnum.conf from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_dr from magnum import objects from magnum.tests import base CONF = magnum.conf.CONF class TestClusterConductorWithK8s(base.TestCase): def setUp(self): super(TestClusterConductorWithK8s, self).setUp() self.cluster_template_dict = { 'image_id': 'image_id', 'flavor_id': 'flavor_id', 'master_flavor_id': 'master_flavor_id', 'keypair_id': 'keypair_id', 'dns_nameserver': 'dns_nameserver', 'external_network_id': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'docker_volume_size': 20, 'master_flavor_id': 'flavor_id', 'docker_storage_driver': 'devicemapper', 'cluster_distro': 'fedora-atomic', 'coe': 'kubernetes', 'token': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': 0, 'availability_zone': 'az_1'}, 'tls_disabled': False, 'server_type': 'vm', 'registry_enabled': False, 'insecure_registry': '10.0.0.1:5000', 'master_lb_enabled': False, 'floating_ip_enabled': False, } self.cluster_dict = { 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'cluster_template_id': 'xx-xx-xx-xx', 'keypair': 'keypair_id', 'name': 'cluster1', 'stack_id': 'xx-xx-xx-xx', 'api_address': '172.17.2.3', 'node_addresses': ['172.17.2.4'], 'node_count': 1, 'master_count': 1, 'discovery_url': 'https://discovery.etcd.io/test', 'docker_volume_size': 20, 'flavor_id': 'flavor_id', 'master_addresses': ['172.17.2.18'], 'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', 'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'coe_version': 'fake-version', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'influx_grafana_dashboard_enabled': 'True'}, 'master_flavor_id': 'master_flavor_id', 'flavor_id': 'flavor_id', } self.context.user_name = 'fake_user' self.context.project_id = 'fake_tenant' osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') self.mock_osc_class = osc_patcher.start() self.addCleanup(osc_patcher.stop) self.mock_osc = mock.MagicMock() self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' self.mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' self.mock_osc.cinder_region_name.return_value = 'RegionOne' self.mock_keystone = mock.MagicMock() self.mock_keystone.trustee_domain_id = 'trustee_domain_id' self.mock_osc.keystone.return_value = self.mock_keystone self.mock_osc_class.return_value = self.mock_osc @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get) def _test_extract_template_definition( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr=None): if missing_attr in self.cluster_template_dict: self.cluster_template_dict[missing_attr] = None elif missing_attr in self.cluster_dict: self.cluster_dict[missing_attr] = None cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = k8s_dr.Driver() (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) mapping = { 'dns_nameserver': 'dns_nameserver', 'image_id': 'server_image', 'flavor_id': 'minion_flavor', 'docker_volume_size': 'docker_volume_size', 'docker_storage_driver': 'docker_storage_driver', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'master_flavor_id': 'master_flavor', 'apiserver_port': '', 'node_count': 'number_of_minions', 'master_count': 'number_of_masters', 'discovery_url': 'discovery_url', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', }, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'insecure_registry': '10.0.0.1:5000', } expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.etcd.io/test', 'etcd_volume_size': None, 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'username': 'fake_user', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'region_name': self.mock_osc.cinder_region_name.return_value, 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } if missing_attr is not None: expected.pop(mapping[missing_attr], None) self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', ], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_with_registry( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['registry_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = k8s_dr.Driver() CONF.set_override('swift_region', 'RegionOne', group='docker_registry') CONF.set_override('cluster_user_trust', True, group='trust') (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'discovery_url': 'https://discovery.etcd.io/test', 'dns_nameserver': 'dns_nameserver', 'docker_storage_driver': 'devicemapper', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'flannel_backend': 'vxlan', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'magnum_url': 'http://127.0.0.1:9511/v1', 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'network_driver': 'network_driver', 'no_proxy': 'no_proxy', 'number_of_masters': 1, 'number_of_minions': 1, 'region_name': 'RegionOne', 'registry_container': 'docker_registry', 'registry_enabled': True, 'server_image': 'image_id', 'ssh_key_name': 'keypair_id', 'swift_region': 'RegionOne', 'tls_disabled': False, 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trustee_username': 'fake_trustee', 'username': 'fake_user', 'volume_driver': 'volume_driver', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', ], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_only_required( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): not_required = ['image_id', 'flavor_id', 'dns_nameserver', 'docker_volume_size', 'fixed_network', 'http_proxy', 'https_proxy', 'no_proxy', 'network_driver', 'master_flavor_id', 'docker_storage_driver', 'volume_driver', 'fixed_subnet'] for key in not_required: self.cluster_template_dict[key] = None self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = k8s_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'discovery_url': 'https://discovery.etcd.io/test', 'docker_volume_size': 20, 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'external_network': 'external_network_id', 'flannel_backend': 'vxlan', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'magnum_url': 'http://127.0.0.1:9511/v1', 'number_of_masters': 1, 'number_of_minions': 1, 'region_name': 'RegionOne', 'registry_enabled': False, 'ssh_key_name': 'keypair_id', 'tls_disabled': False, 'trust_id': '', 'trustee_domain_id': 'trustee_domain_id', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trustee_username': 'fake_trustee', 'username': 'fake_user', 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/with_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', ], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_coreos_with_disovery( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['cluster_distro'] = 'coreos' cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = k8s_coreos_dr.Driver() (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'discovery_url': 'https://discovery.etcd.io/test', 'etcd_volume_size': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_coreos_no_discoveryurl( self, mock_driver, mock_objects_cluster_template_get_by_uuid, reqget): self.cluster_template_dict['cluster_distro'] = 'coreos' self.cluster_dict['discovery_url'] = None mock_req = mock.MagicMock(text='http://tokentest/h1/h2/h3') reqget.return_value = mock_req cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = k8s_coreos_dr.Driver() (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'discovery_url': 'http://tokentest/h1/h2/h3', 'etcd_volume_size': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml'], env_files) @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_dns( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='dns_nameserver') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_server_image( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='image_id') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_docker_storage_driver( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='docker_storage_driver') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_apiserver_port( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='apiserver_port') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_node_count( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='node_count') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_master_count( self, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): mock_driver.return_value = k8s_dr.Driver() self._test_extract_template_definition( mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, missing_attr='master_count') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_extract_template_definition_without_discovery_url( self, mock_driver, mock_objects_cluster_template_get_by_uuid, reqget): cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster_dict = self.cluster_dict cluster_dict['discovery_url'] = None cluster = objects.Cluster(self.context, **cluster_dict) mock_driver.return_value = k8s_dr.Driver() CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') mock_req = mock.MagicMock(text='https://address/token') reqget.return_value = mock_req (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://address/token', 'etcd_volume_size': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'username': 'fake_user', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'region_name': self.mock_osc.cinder_region_name.return_value, 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', ], env_files) reqget.assert_called_once_with('http://etcd/test?size=1') @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack(self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' expected_timeout = 15 mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_dr.Driver().create_cluster(self.context, mock_cluster, expected_timeout) expected_args = { 'stack_name': expected_stack_name, 'parameters': {}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack_no_timeout_specified( self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' expected_timeout = CONF.cluster_heat.create_timeout mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_dr.Driver().create_cluster(self.context, mock_cluster, None) expected_args = { 'stack_name': expected_stack_name, 'parameters': {}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack_timeout_is_zero( self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' cluster_timeout = 0 expected_timeout = CONF.cluster_heat.create_timeout mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_dr.Driver().create_cluster(self.context, mock_cluster, cluster_timeout) expected_args = { 'stack_name': expected_stack_name, 'parameters': {}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_update_stack(self, mock_osc, mock_extract_template_definition, mock_get_template_contents): mock_stack_id = 'xx-xx-xx-xx' expected_template_contents = 'template_contents' mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.stack_id = mock_stack_id k8s_dr.Driver().update_cluster({}, mock_cluster) expected_args = { 'parameters': {}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'disable_rollback': True } mock_heat_client.stacks.update.assert_called_once_with(mock_stack_id, **expected_args) magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/0000775000175100017510000000000013244017675023534 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py0000666000175100017510000003021613244017334027570 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.common import exception from magnum.conductor.handlers.common import cert_manager from magnum.tests import base class CertManagerTestCase(base.BaseTestCase): def setUp(self): super(CertManagerTestCase, self).setUp() cert_manager_patcher = mock.patch.object(cert_manager, 'cert_manager') self.cert_manager = cert_manager_patcher.start() self.addCleanup(cert_manager_patcher.stop) self.cert_manager_backend = mock.MagicMock() self.cert_manager.get_backend.return_value = self.cert_manager_backend self.cert_manager_backend.CertManager = mock.MagicMock() self.CertManager = self.cert_manager_backend.CertManager @mock.patch('magnum.common.x509.operations.generate_ca_certificate') @mock.patch('magnum.common.short_id.generate_id') def test_generate_ca_cert(self, mock_generate_id, mock_generate_ca_cert): expected_ca_name = 'ca-name' expected_ca_password = 'password' expected_ca_cert = { 'private_key': 'private_key', 'certificate': 'certificate'} expected_ca_cert_ref = 'ca_cert_ref' mock_generate_id.return_value = expected_ca_password mock_generate_ca_cert.return_value = expected_ca_cert self.CertManager.store_cert.return_value = expected_ca_cert_ref self.assertEqual((expected_ca_cert_ref, expected_ca_cert, expected_ca_password), cert_manager._generate_ca_cert(expected_ca_name)) mock_generate_ca_cert.assert_called_once_with( expected_ca_name, encryption_password=expected_ca_password) self.CertManager.store_cert.assert_called_once_with( certificate=expected_ca_cert['certificate'], private_key=expected_ca_cert['private_key'], private_key_passphrase=expected_ca_password, name=expected_ca_name, context=None ) @mock.patch('magnum.common.x509.operations.generate_client_certificate') @mock.patch('magnum.common.short_id.generate_id') def test_generate_client_cert(self, mock_generate_id, mock_generate_cert): expected_name = 'admin' expected_organization_name = 'system:masters' expected_ca_name = 'ca-name' expected_password = 'password' expected_ca_password = 'ca-password' expected_cert = { 'private_key': 'private_key', 'certificate': 'certificate'} expected_ca_cert = { 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} expected_cert_ref = 'cert_ref' mock_generate_id.return_value = expected_password mock_generate_cert.return_value = expected_cert self.CertManager.store_cert.return_value = expected_cert_ref self.assertEqual( expected_cert_ref, cert_manager._generate_client_cert( expected_ca_name, expected_ca_cert, expected_ca_password)) mock_generate_cert.assert_called_once_with( expected_ca_name, expected_name, expected_organization_name, expected_ca_cert['private_key'], encryption_password=expected_password, ca_key_password=expected_ca_password, ) self.CertManager.store_cert.assert_called_once_with( certificate=expected_cert['certificate'], private_key=expected_cert['private_key'], private_key_passphrase=expected_password, name=cert_manager.CONDUCTOR_CLIENT_NAME, context=None ) def _test_generate_certificates(self, expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_password = 'ca-password' expected_ca_cert = { 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca-cert-ref' mock_generate_ca_cert.return_value = (expected_ca_cert_ref, expected_ca_cert, expected_ca_password) mock_generate_client_cert.return_value = expected_cert_ref cert_manager.generate_certificates_to_cluster(mock_cluster) self.assertEqual(expected_ca_cert_ref, mock_cluster.ca_cert_ref) self.assertEqual(expected_cert_ref, mock_cluster.magnum_cert_ref) mock_generate_ca_cert.assert_called_once_with(expected_ca_name, context=None) mock_generate_client_cert.assert_called_once_with( expected_ca_name, expected_ca_cert, expected_ca_password, context=None) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_client_cert') @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_ca_cert') def test_generate_certificates(self, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_name = 'ca-name' mock_cluster = mock.MagicMock() mock_cluster.name = expected_ca_name self._test_generate_certificates(expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_client_cert') @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_ca_cert') def test_generate_certificates_without_name(self, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_name = 'ca-uuid' mock_cluster = mock.MagicMock() mock_cluster.name = None mock_cluster.uuid = expected_ca_name self._test_generate_certificates(expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_get_issuer_name') def test_generate_certificates_with_error(self, mock_get_issuer_name): mock_cluster = mock.MagicMock() mock_get_issuer_name.side_effect = exception.MagnumException() self.assertRaises(exception.CertificatesToClusterFailed, cert_manager.generate_certificates_to_cluster, mock_cluster) @mock.patch('magnum.common.x509.operations.sign') def test_sign_node_certificate(self, mock_x509_sign): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key passphrase = mock.sentinel.passphrase mock_ca_cert.get_private_key_passphrase.return_value = passphrase self.CertManager.get_cert.return_value = mock_ca_cert mock_csr = mock.MagicMock() mock_x509_sign.return_value = mock.sentinel.signed_cert cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, mock_csr) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.name, mock.sentinel.priv_key, passphrase) self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) @mock.patch('magnum.common.x509.operations.sign') def test_sign_node_certificate_without_cluster_name(self, mock_x509_sign): mock_cluster = mock.MagicMock() mock_cluster.name = None mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key passphrase = mock.sentinel.passphrase mock_ca_cert.get_private_key_passphrase.return_value = passphrase self.CertManager.get_cert.return_value = mock_ca_cert mock_csr = mock.MagicMock() mock_x509_sign.return_value = mock.sentinel.signed_cert cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, mock_csr) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.uuid, mock.sentinel.priv_key, passphrase) self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) def test_get_cluster_ca_certificate(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() self.CertManager.get_cert.return_value = mock_ca_cert cluster_ca_cert = cert_manager.get_cluster_ca_certificate(mock_cluster) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) self.assertEqual(mock_ca_cert, cluster_ca_cert) def test_delete_certtificate(self): mock_delete_cert = self.CertManager.delete_cert expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca_cert_ref' mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_cluster.ca_cert_ref = expected_ca_cert_ref mock_cluster.magnum_cert_ref = expected_cert_ref cert_manager.delete_certificates_from_cluster(mock_cluster) mock_delete_cert.assert_any_call(expected_ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_delete_cert.assert_any_call(expected_cert_ref, resource_ref=mock_cluster.uuid, context=None) def test_delete_certtificate_if_raise_error(self): mock_delete_cert = self.CertManager.delete_cert expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca_cert_ref' mock_cluster = mock.MagicMock() mock_cluster.ca_cert_ref = expected_ca_cert_ref mock_cluster.magnum_cert_ref = expected_cert_ref mock_delete_cert.side_effect = ValueError cert_manager.delete_certificates_from_cluster(mock_cluster) mock_delete_cert.assert_any_call(expected_ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_delete_cert.assert_any_call(expected_cert_ref, resource_ref=mock_cluster.uuid, context=None) def test_delete_certtificate_without_cert_ref(self): mock_delete_cert = self.CertManager.delete_cert mock_cluster = mock.MagicMock() mock_cluster.ca_cert_ref = None mock_cluster.magnum_cert_ref = None cert_manager.delete_certificates_from_cluster(mock_cluster) self.assertFalse(mock_delete_cert.called) magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/test_trust_manager.py0000666000175100017510000001145013244017334030013 0ustar zuulzuul00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch from magnum.common import exception from magnum.conductor.handlers.common import trust_manager from magnum.tests import base class TrustManagerTestCase(base.BaseTestCase): def setUp(self): super(TrustManagerTestCase, self).setUp() osc_class_patcher = patch('magnum.common.clients.OpenStackClients') osc_class = osc_class_patcher.start() self.addCleanup(osc_class_patcher.stop) self.osc = mock.MagicMock() osc_class.return_value = self.osc @patch('magnum.common.utils.generate_password') def test_create_trustee_and_trust(self, mock_generate_password): mock_password = "password_mock" mock_generate_password.return_value = mock_password mock_cluster = mock.MagicMock() mock_cluster.uuid = 'mock_cluster_uuid' mock_cluster.project_id = 'mock_cluster_project_id' mock_keystone = mock.MagicMock() mock_trustee = mock.MagicMock() mock_trustee.id = 'mock_trustee_id' mock_trustee.name = 'mock_trustee_username' mock_trust = mock.MagicMock() mock_trust.id = 'mock_trust_id' self.osc.keystone.return_value = mock_keystone mock_keystone.create_trustee.return_value = mock_trustee mock_keystone.create_trust.return_value = mock_trust trust_manager.create_trustee_and_trust(self.osc, mock_cluster) mock_keystone.create_trustee.assert_called_once_with( '%s_%s' % (mock_cluster.uuid, mock_cluster.project_id), mock_password, ) mock_keystone.create_trust.assert_called_once_with( mock_trustee.id, ) self.assertEqual(mock_trustee.name, mock_cluster.trustee_username) self.assertEqual(mock_trustee.id, mock_cluster.trustee_user_id) self.assertEqual(mock_password, mock_cluster.trustee_password) self.assertEqual(mock_trust.id, mock_cluster.trust_id) @patch('magnum.common.utils.generate_password') def test_create_trustee_and_trust_with_error(self, mock_generate_password): mock_cluster = mock.MagicMock() mock_generate_password.side_effect = exception.MagnumException() self.assertRaises(exception.TrusteeOrTrustToClusterFailed, trust_manager.create_trustee_and_trust, self.osc, mock_cluster) def test_delete_trustee_and_trust(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster ) mock_keystone.delete_trustee.assert_called_once_with( mock_cluster.trustee_user_id, ) def test_delete_trustee_and_trust_without_trust_id(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = None mock_cluster.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) self.assertEqual(0, mock_keystone.delete_trust.call_count) mock_keystone.delete_trustee.assert_called_once_with( mock_cluster.trustee_user_id, ) def test_delete_trustee_and_trust_without_trustee_user_id(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = None mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster ) self.assertEqual(0, mock_keystone.delete_trustee.call_count) magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/__init__.py0000666000175100017510000000000013244017334025625 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_ca_conductor.py0000666000175100017510000000461413244017334026317 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.conductor.handlers import ca_conductor from magnum.tests import base class TestSignConductor(base.TestCase): def setUp(self): super(TestSignConductor, self).setUp() self.ca_handler = ca_conductor.Handler() @mock.patch.object(ca_conductor, 'cert_manager') def test_sign_certificate(self, mock_cert_manager): mock_cluster = mock.MagicMock() mock_certificate = mock.MagicMock() mock_certificate.csr = 'fake-csr' mock_cert_manager.sign_node_certificate.return_value = 'fake-pem' actual_cert = self.ca_handler.sign_certificate(self.context, mock_cluster, mock_certificate) mock_cert_manager.sign_node_certificate.assert_called_once_with( mock_cluster, 'fake-csr', context=self.context ) self.assertEqual('fake-pem', actual_cert.pem) @mock.patch.object(ca_conductor, 'cert_manager') def test_get_ca_certificate(self, mock_cert_manager): mock_cluster = mock.MagicMock() mock_cluster.uuid = 'cluster-uuid' mock_cluster.user_id = 'user-id' mock_cluster.project_id = 'project-id' mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = 'fake-pem' mock_cert_manager.get_cluster_ca_certificate.return_value = mock_cert actual_cert = self.ca_handler.get_ca_certificate(self.context, mock_cluster) self.assertEqual(mock_cluster.uuid, actual_cert.cluster_uuid) self.assertEqual(mock_cluster.user_id, actual_cert.user_id) self.assertEqual(mock_cluster.project_id, actual_cert.project_id) self.assertEqual('fake-pem', actual_cert.pem) magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py0000666000175100017510000005464213244017343027423 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from heatclient import exc import mock from mock import patch from oslo_service import loopingcall from pycadf import cadftaxonomy as taxonomy from magnum.common import exception from magnum.conductor.handlers import cluster_conductor import magnum.conf from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import fake_notifier from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils CONF = magnum.conf.CONF class TestHandler(db_base.DbTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() cluster_dict = utils.get_test_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.cluster.create() @patch('magnum.conductor.scale_manager.get_scale_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_update_node_count_success( self, mock_openstack_client_class, mock_driver, mock_scale_manager): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr self.cluster.node_count = 2 self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_update(self.context, self.cluster) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.update_cluster.assert_called_once_with( self.context, self.cluster, mock_scale_manager.return_value, False) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) @patch('magnum.common.clients.OpenStackClients') def test_update_node_count_failure( self, mock_openstack_client_class): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_FAILED mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client self.cluster.node_count = 2 self.cluster.status = cluster_status.CREATE_FAILED self.assertRaises(exception.NotSupported, self.handler.cluster_update, self.context, self.cluster) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(1, cluster.node_count) @patch('magnum.conductor.scale_manager.get_scale_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def _test_update_cluster_status_complete( self, expect_status, mock_openstack_client_class, mock_driver, mock_scale_manager): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = expect_status mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr self.cluster.node_count = 2 self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_update(self.context, self.cluster) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.update_cluster.assert_called_once_with( self.context, self.cluster, mock_scale_manager.return_value, False) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) def test_update_cluster_status_update_complete(self): self._test_update_cluster_status_complete( cluster_status.UPDATE_COMPLETE) def test_update_cluster_status_resume_complete(self): self._test_update_cluster_status_complete( cluster_status.RESUME_COMPLETE) def test_update_cluster_status_restore_complete(self): self._test_update_cluster_status_complete( cluster_status.RESTORE_COMPLETE) def test_update_cluster_status_rollback_complete(self): self._test_update_cluster_status_complete( cluster_status.ROLLBACK_COMPLETE) def test_update_cluster_status_snapshot_complete(self): self._test_update_cluster_status_complete( cluster_status.SNAPSHOT_COMPLETE) def test_update_cluster_status_check_complete(self): self._test_update_cluster_status_complete( cluster_status.CHECK_COMPLETE) def test_update_cluster_status_adopt_complete(self): self._test_update_cluster_status_complete( cluster_status.ADOPT_COMPLETE) @patch('magnum.drivers.heat.driver.HeatPoller') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create(self, mock_openstack_client_class, mock_driver, mock_cm, mock_trust_manager, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller osc = mock.sentinel.osc def return_keystone(): return self.keystone_client osc.keystone = return_keystone mock_openstack_client_class.return_value = osc mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr def create_stack_side_effect(context, osc, cluster, timeout): return {'stack': {'id': 'stack-id'}} mock_dr.create_stack.side_effect = create_stack_side_effect # FixMe(eliqiao): cluster_create will call cluster.create() # again, this so bad because we have already called it in setUp # since other test case will share the codes in setUp() # But in self.handler.cluster_create, we update cluster.uuid and # cluster.stack_id so cluster.create will create a new recored with # clustermodel_id None, this is bad because we load clusterModel # object in cluster object by clustermodel_id. Here update # self.cluster.clustermodel_id so cluster.obj_get_changes will get # notice that clustermodel_id is updated and will update it # in db. self.cluster.cluster_template_id = self.cluster_template.uuid cluster = self.handler.cluster_create(self.context, self.cluster, timeout) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.create_cluster.assert_called_once_with(self.context, self.cluster, timeout) mock_cm.generate_certificates_to_cluster.assert_called_once_with( self.cluster, context=self.context) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) mock_trust_manager.create_trustee_and_trust.assert_called_once_with( osc, self.cluster) def _test_create_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, expected_exception, is_create_cert_called=True, is_create_trust_called=True): osc = mock.MagicMock() mock_openstack_client_class.return_value = osc timeout = 15 self.assertRaises( expected_exception, self.handler.cluster_create, self.context, self.cluster, timeout ) gctb = mock_cert_manager.generate_certificates_to_cluster if is_create_cert_called: gctb.assert_called_once_with(self.cluster, context=self.context) else: gctb.assert_not_called() ctat = mock_trust_manager.create_trustee_and_trust if is_create_trust_called: ctat.assert_called_once_with(osc, self.cluster) else: ctat.assert_not_called() mock_cluster_create.assert_called_once_with() @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create_handles_bad_request(self, mock_openstack_client_class, mock_driver, mock_cert_manager, mock_trust_manager, mock_cluster_create): mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr mock_dr.create_cluster.side_effect = exc.HTTPBadRequest self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.InvalidParameterValue ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.create', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') def test_create_with_cert_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create): e = exception.CertificatesToClusterFailed(cluster_uuid='uuid') mock_cert_manager.generate_certificates_to_cluster.side_effect = e self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.CertificatesToClusterFailed ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') def test_create_with_trust_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create): e = exception.TrusteeOrTrustToClusterFailed(cluster_uuid='uuid') mock_trust_manager.create_trustee_and_trust.side_effect = e self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.TrusteeOrTrustToClusterFailed, False ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create_with_invalid_unicode_name(self, mock_openstack_client_class, mock_driver, mock_cert_manager, mock_trust_manager, mock_cluster_create): error_message = six.u("""Invalid stack name 测试集群-zoyh253geukk must contain only alphanumeric or "_-." characters, must start with alpha""") mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr mock_dr.create_cluster.side_effect = exc.HTTPBadRequest(error_message) self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.InvalidParameterValue ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.create', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) @patch('magnum.drivers.heat.driver.HeatPoller') @patch('heatclient.common.template_utils' '.process_multiple_environments_and_files') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.common.short_id.generate_id') def test_create_with_environment(self, mock_short_id, mock_openstack_client_class, mock_driver, mock_extract_tmpl_def, mock_cert_manager, mock_trust_manager, mock_get_template_contents, mock_process_mult, mock_heat_poller_class): timeout = 15 self.cluster.cluster_template_id = self.cluster_template.uuid self.cluster.name = 'cluster1' cluster_name = self.cluster.name mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller mock_driver.return_value = k8s_atomic_dr.Driver() mock_short_id.return_value = 'short_id' mock_extract_tmpl_def.return_value = ( 'the/template/path.yaml', {'heat_param_1': 'foo', 'heat_param_2': 'bar'}, ['env_file_1', 'env_file_2']) mock_get_template_contents.return_value = ( {'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content'}, 'some template yaml') def do_mock_process_mult(env_paths=None, env_list_tracker=None): self.assertEqual(env_list_tracker, []) for f in env_paths: env_list_tracker.append('file:///' + f) env_map = {path: 'content of ' + path for path in env_list_tracker} return (env_map, None) mock_process_mult.side_effect = do_mock_process_mult mock_hc = mock.Mock() mock_hc.stacks.create.return_value = {'stack': {'id': 'stack-id'}} osc = mock.Mock() osc.heat.return_value = mock_hc mock_openstack_client_class.return_value = osc self.handler.cluster_create(self.context, self.cluster, timeout) mock_extract_tmpl_def.assert_called_once_with(self.context, self.cluster) mock_get_template_contents.assert_called_once_with( 'the/template/path.yaml') mock_process_mult.assert_called_once_with( env_paths=['the/template/env_file_1', 'the/template/env_file_2'], env_list_tracker=mock.ANY) mock_hc.stacks.create.assert_called_once_with( environment_files=['file:///the/template/env_file_1', 'file:///the/template/env_file_2'], files={ 'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content', 'file:///the/template/env_file_1': 'content of file:///the/template/env_file_1', 'file:///the/template/env_file_2': 'content of file:///the/template/env_file_2' }, parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'}, stack_name=('%s-short_id' % cluster_name), template='some template yaml', timeout_mins=timeout) @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_cluster_delete(self, mock_driver, mock_openstack_client_class, cert_manager): mock_driver.return_value = k8s_atomic_dr.Driver() osc = mock.MagicMock() mock_openstack_client_class.return_value = osc osc.heat.side_effect = exc.HTTPNotFound self.handler.cluster_delete(self.context, self.cluster.uuid) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.delete', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.delete', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome']) self.assertEqual( 1, cert_manager.delete_certificates_from_cluster.call_count) # The cluster has been destroyed self.assertRaises(exception.ClusterNotFound, objects.Cluster.get, self.context, self.cluster.uuid) @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def test_cluster_delete_conflict(self, mock_driver, mock_openstack_client_class, cert_manager): mock_driver.return_value = k8s_atomic_dr.Driver() osc = mock.MagicMock() mock_openstack_client_class.return_value = osc osc.heat.side_effect = exc.HTTPConflict self.assertRaises(exception.OperationInProgress, self.handler.cluster_delete, self.context, self.cluster.uuid) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.delete', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.delete', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) self.assertEqual( 0, cert_manager.delete_certificates_from_cluster.call_count) magnum-6.1.0/magnum/tests/unit/conductor/handlers/__init__.py0000666000175100017510000000000013244017334024335 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_federation_conductor.py0000666000175100017510000000311013244017334030042 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor.handlers import federation_conductor from magnum import objects from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils class TestHandler(db_base.DbTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = federation_conductor.Handler() federation_dict = utils.get_test_federation() self.federation = objects.Federation(self.context, **federation_dict) self.federation.create() def test_create_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_create, self.context, self.federation, create_timeout=15) def test_update_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_update, self.context, self.federation, rollback=False) def test_delete_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_delete, self.context, self.federation.uuid) magnum-6.1.0/magnum/tests/unit/conductor/test_utils.py0000666000175100017510000002274713244017334023223 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch from magnum.conductor import utils from magnum import objects from magnum.tests import base class TestConductorUtils(base.TestCase): def _test_retrieve_cluster(self, expected_cluster_uuid, mock_cluster_get_by_uuid): expected_context = 'context' utils.retrieve_cluster(expected_context, expected_cluster_uuid) mock_cluster_get_by_uuid.assert_called_once_with( expected_context, expected_cluster_uuid) def get_fake_id(self): return '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' def _get_type_uri(self): return 'service/security/account/user' @patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_retrieve_cluster_template(self, mock_cluster_template_get_by_uuid): expected_context = 'context' expected_cluster_template_uuid = 'ClusterTemplate_uuid' cluster = objects.Cluster({}) cluster.cluster_template_id = expected_cluster_template_uuid utils.retrieve_cluster_template(expected_context, cluster) mock_cluster_template_get_by_uuid.assert_called_once_with( expected_context, expected_cluster_template_uuid) @patch('oslo_utils.uuidutils.is_uuid_like') @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_uuid_from_name(self, mock_cluster_get_by_name, mock_uuid_like): cluster = objects.Cluster(uuid='5d12f6fd-a196-4bf0-ae4c-1f639a523a52') mock_uuid_like.return_value = False mock_cluster_get_by_name.return_value = cluster cluster_uuid = utils.retrieve_cluster_uuid('context', 'fake_name') self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) mock_uuid_like.assert_called_once_with('fake_name') mock_cluster_get_by_name.assert_called_once_with('context', 'fake_name') @patch('oslo_utils.uuidutils.is_uuid_like') @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_uuid_from_uuid(self, mock_cluster_get_by_name, mock_uuid_like): cluster_uuid = utils.retrieve_cluster_uuid( 'context', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) mock_uuid_like.return_value = True mock_cluster_get_by_name.assert_not_called() def _get_heat_stacks_get_mock_obj(self, status): mock_stack = mock.MagicMock() mock_osc = mock.MagicMock() mock_stack_obj = mock.MagicMock() mock_stack_obj.stack_status = status stack_get = mock.MagicMock() stack_get.get.return_value = mock_stack_obj mock_stack.stacks = stack_get mock_osc.heat.return_value = mock_stack return mock_osc @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_invalid_status(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("INVALID_STATUS") mock_oscs.return_value = mock_osc self.assertTrue(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_delete_in_progress(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_IN_PROGRESS") mock_oscs.return_value = mock_osc self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_delete_complete_status(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_COMPLETE") mock_oscs.return_value = mock_osc self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.objects.Cluster.get_by_uuid') def test_retrieve_cluster_uuid(self, mock_get_by_uuid): mock_get_by_uuid.return_value = True utils.retrieve_cluster('context', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') self.assertTrue(mock_get_by_uuid.called) @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_name(self, mock_get_by_name): mock_get_by_name.return_value = mock.MagicMock() utils.retrieve_cluster('context', '1') self.assertTrue(mock_get_by_name.called) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_context(self, mock_resource): mock_resource.return_value = 'resource' result = utils._get_request_audit_info(context=None) self.assertTrue(mock_resource.called) self.assertEqual(result, 'resource') def _assert_for_user_project_domain_resource(self, result, ctxt, mock_res): mock_res.assert_called_once_with(typeURI=self._get_type_uri()) self.assertEqual(result.user_id, ctxt.user_id) self.assertEqual(result.project_id, ctxt.project_id) self.assertEqual(result.domain_id, ctxt.domain_id) def _get_context(self, user_id=None, project_id=None, domain_id=None): context = self.mock_make_context() context.user_id = user_id context.project_id = project_id context.domain_id = domain_id return context @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_userid(self, mock_resource): context = self._get_context(project_id='test_project_id', domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_projectid(self, mock_resource): context = self._get_context(user_id='test_user_id', domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domainid(self, mock_resource): context = self._get_context(user_id='test_user_id', project_id='test_project_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domainid_userid(self, mock_resource): context = self._get_context(project_id='test_project_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_userid_projectid(self, mock_resource): context = self._get_context(domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domain_project_id(self, mock_resource): context = self._get_context(user_id='test_user_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) magnum-6.1.0/magnum/tests/unit/conductor/test_scale_manager.py0000666000175100017510000002244513244017334024637 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.common import exception from magnum.conductor import scale_manager from magnum.drivers.common.k8s_scale_manager import K8sScaleManager from magnum.drivers.mesos_ubuntu_v1.scale_manager import MesosScaleManager from magnum.tests import base class TestScaleManager(base.TestCase): def _test_get_removal_nodes( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts): mock_is_scale_down.return_value = is_scale_down mock_get_num_of_removal.return_value = num_of_removal mock_get_hosts.return_value = container_hosts mock_heat_output = mock.MagicMock() mock_heat_output.get_output_value.return_value = all_hosts mock_stack = mock.MagicMock() mock_heat_client = mock.MagicMock() mock_osc = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_stack mock_osc.heat.return_value = mock_heat_client mock_context = mock.MagicMock() mock_cluster = mock.MagicMock() scale_mgr = scale_manager.ScaleManager(mock_context, mock_osc, mock_cluster) if expected_removal_hosts is None: self.assertRaises(exception.MagnumException, scale_mgr.get_removal_nodes, mock_heat_output) else: removal_hosts = scale_mgr.get_removal_nodes(mock_heat_output) self.assertEqual(expected_removal_hosts, removal_hosts) if num_of_removal > 0: mock_get_hosts.assert_called_once_with(mock_context, mock_cluster) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_no_container_host( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3'] container_hosts = set() expected_removal_hosts = ['10.0.0.3'] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_one_container_host( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3']) expected_removal_hosts = ['10.0.0.4'] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_two_container_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3', '10.0.0.4']) expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_three_container_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3', '10.0.0.4', '10.0.0.5']) expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_scale_up( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = False num_of_removal = -1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set() expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_with_none_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = None container_hosts = set() expected_removal_hosts = None self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) class TestK8sScaleManager(base.TestCase): @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.k8s_api.create_k8s_api') def test_get_hosts_with_container(self, mock_create_api, mock_get): pods = mock.MagicMock() pod_1 = mock.MagicMock() pod_1.spec.node_name = 'node1' pod_2 = mock.MagicMock() pod_2.spec.node_name = 'node2' pods.items = [pod_1, pod_2] mock_api = mock.MagicMock() mock_api.list_namespaced_pod.return_value = pods mock_create_api.return_value = mock_api mgr = K8sScaleManager( mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) hosts = mgr._get_hosts_with_container( mock.MagicMock(), mock.MagicMock()) self.assertEqual(hosts, {'node1', 'node2'}) class TestMesosScaleManager(base.TestCase): @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('marathon.MarathonClient') @mock.patch('marathon.MarathonClient.list_tasks') def test_get_hosts_with_container(self, mock_list_tasks, mock_client, mock_get): task_1 = mock.MagicMock() task_1.host = 'node1' task_2 = mock.MagicMock() task_2.host = 'node2' tasks = [task_1, task_2] mock_list_tasks.return_value = tasks mgr = MesosScaleManager( mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) hosts = mgr._get_hosts_with_container( mock.MagicMock(), mock.MagicMock()) self.assertEqual(hosts, {'node1', 'node2'}) magnum-6.1.0/magnum/tests/unit/conductor/test_rpcapi.py0000666000175100017510000001026213244017334023326 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for :py:class:`magnum.conductor.rpcapi.API`. """ import copy import mock from magnum.conductor import api as conductor_rpcapi from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils as dbutils class RPCAPITestCase(base.DbTestCase): def setUp(self): super(RPCAPITestCase, self).setUp() self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver') self.fake_certificate = objects.Certificate.from_db_cluster( self.fake_cluster) self.fake_certificate.csr = 'fake-csr' def _test_rpcapi(self, method, rpc_method, **kwargs): rpcapi_cls = kwargs.pop('rpcapi_cls', conductor_rpcapi.API) rpcapi = rpcapi_cls(topic='fake-topic') expected_retval = 'hello world' if rpc_method == 'call' else None expected_topic = 'fake-topic' if 'host' in kwargs: expected_topic += ".%s" % kwargs['host'] target = { "topic": expected_topic, "version": kwargs.pop('version', 1.0) } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwargs): for kwd in kwargs: self.assertEqual(target[kwd], kwargs[kwd]) return rpcapi._client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi._client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi._client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(**kwargs) self.assertEqual(expected_retval, retval) expected_args = [None, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_cluster_create(self): self._test_rpcapi('cluster_create', 'call', version='1.0', cluster=self.fake_cluster, create_timeout=15) def test_cluster_delete(self): self._test_rpcapi('cluster_delete', 'call', version='1.0', uuid=self.fake_cluster['uuid']) self._test_rpcapi('cluster_delete', 'call', version='1.1', uuid=self.fake_cluster['name']) def test_cluster_update(self): self._test_rpcapi('cluster_update', 'call', version='1.1', cluster=self.fake_cluster['name']) def test_ping_conductor(self): self._test_rpcapi('ping_conductor', 'call', rpcapi_cls=conductor_rpcapi.ListenerAPI, version='1.0') def test_sign_certificate(self): self._test_rpcapi('sign_certificate', 'call', version='1.0', cluster=self.fake_cluster, certificate=self.fake_certificate) def test_get_ca_certificate(self): self._test_rpcapi('get_ca_certificate', 'call', version='1.0', cluster=self.fake_cluster) magnum-6.1.0/magnum/tests/unit/conductor/__init__.py0000666000175100017510000000000013244017334022535 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conductor/test_k8s_api.py0000666000175100017510000000355113244017334023411 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.tests import base class TestK8sAPI(base.TestCase): content_dict = { 'fake-magnum-cert-ref': { 'certificate': 'certificate-content', 'private_key': 'private-key-content', 'decrypted_private_key': 'private-key-content', }, 'fake-ca-cert-ref': { 'certificate': 'ca-cert-content', 'private_key': None, 'decrypted_private_key': None, } } file_dict = { 'ca-cert-content': mock.MagicMock(), 'certificate-content': mock.MagicMock(), 'private-key-content': mock.MagicMock() } file_name = { 'ca-cert-content': 'ca-cert-temp-file-name', 'certificate-content': 'cert-temp-file-name', 'private-key-content': 'priv-key-temp-file-name' } def _mock_cert_mgr_get_cert(self, cert_ref, **kwargs): cert_obj = mock.MagicMock() cert_obj.get_certificate.return_value = ( TestK8sAPI.content_dict[cert_ref]['certificate']) cert_obj.get_private_key.return_value = ( TestK8sAPI.content_dict[cert_ref]['private_key']) cert_obj.get_decrypted_private_key.return_value = ( TestK8sAPI.content_dict[cert_ref]['decrypted_private_key']) return cert_obj magnum-6.1.0/magnum/tests/unit/conductor/test_monitors.py0000666000175100017510000003100313244017334023716 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_serialization import jsonutils from magnum.drivers.common import k8s_monitor from magnum.drivers.mesos_ubuntu_v1 import monitor as mesos_monitor from magnum.drivers.swarm_fedora_atomic_v1 import monitor as swarm_monitor from magnum import objects from magnum.tests import base from magnum.tests.unit.db import utils class MonitorsTestCase(base.TestCase): test_metrics_spec = { 'metric1': { 'unit': 'metric1_unit', 'func': 'metric1_func', }, 'metric2': { 'unit': 'metric2_unit', 'func': 'metric2_func', }, } def setUp(self): super(MonitorsTestCase, self).setUp() cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6']) self.cluster = objects.Cluster(self.context, **cluster) self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) self.mesos_monitor = mesos_monitor.MesosMonitor(self.context, self.cluster) p = mock.patch('magnum.drivers.swarm_fedora_atomic_v1.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec = p.start() self.mock_metrics_spec.return_value = self.test_metrics_spec self.addCleanup(p.stop) @mock.patch('magnum.common.docker_utils.docker_for_cluster') def test_swarm_monitor_pull_data_success(self, mock_docker_cluster): mock_docker = mock.MagicMock() mock_docker.info.return_value = {'DriverStatus': [[ u' \u2514 Reserved Memory', u'0 B / 1 GiB']]} mock_docker.containers.return_value = [mock.MagicMock()] mock_docker.inspect_container.return_value = 'test_container' mock_docker_cluster.return_value.__enter__.return_value = mock_docker self.monitor.pull_data() self.assertEqual([{'MemTotal': 1073741824.0}], self.monitor.data['nodes']) self.assertEqual(['test_container'], self.monitor.data['containers']) @mock.patch('magnum.common.docker_utils.docker_for_cluster') def test_swarm_monitor_pull_data_raise(self, mock_docker_cluster): mock_container = mock.MagicMock() mock_docker = mock.MagicMock() mock_docker.info.return_value = {'DriverStatus': [[ u' \u2514 Reserved Memory', u'0 B / 1 GiB']]} mock_docker.containers.return_value = [mock_container] mock_docker.inspect_container.side_effect = Exception("inspect error") mock_docker_cluster.return_value.__enter__.return_value = mock_docker self.monitor.pull_data() self.assertEqual([{'MemTotal': 1073741824.0}], self.monitor.data['nodes']) self.assertEqual([mock_container], self.monitor.data['containers']) def test_swarm_monitor_get_metric_names(self): names = self.monitor.get_metric_names() self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) def test_swarm_monitor_get_metric_unit(self): unit = self.monitor.get_metric_unit('metric1') self.assertEqual('metric1_unit', unit) def test_swarm_monitor_compute_metric_value(self): mock_func = mock.MagicMock() mock_func.return_value = 'metric1_value' self.monitor.metric1_func = mock_func value = self.monitor.compute_metric_value('metric1') self.assertEqual('metric1_value', value) def test_swarm_monitor_compute_memory_util(self): test_data = { 'nodes': [ { 'Name': 'node', 'MemTotal': 20, }, ], 'containers': [ { 'Name': 'container', 'HostConfig': { 'Memory': 10, }, }, ], } self.monitor.data = test_data mem_util = self.monitor.compute_memory_util() self.assertEqual(50, mem_util) test_data = { 'nodes': [], 'containers': [], } self.monitor.data = test_data mem_util = self.monitor.compute_memory_util() self.assertEqual(0, mem_util) @mock.patch('magnum.conductor.k8s_api.create_k8s_api') def test_k8s_monitor_pull_data_success(self, mock_k8s_api): mock_nodes = mock.MagicMock() mock_node = mock.MagicMock() mock_node.status = mock.MagicMock() mock_node.status.capacity = {'memory': '2000Ki', 'cpu': '1'} mock_nodes.items = [mock_node] mock_k8s_api.return_value.list_node.return_value = ( mock_nodes) mock_pods = mock.MagicMock() mock_pod = mock.MagicMock() mock_pod.spec = mock.MagicMock() mock_container = mock.MagicMock() mock_container.resources = mock.MagicMock() mock_container.resources.limits = "{'memory': '100Mi', 'cpu': '500m'}" mock_pod.spec.containers = [mock_container] mock_pods.items = [mock_pod] mock_k8s_api.return_value.list_namespaced_pod.return_value = mock_pods self.k8s_monitor.pull_data() self.assertEqual(self.k8s_monitor.data['nodes'], [{'Memory': 2048000.0, 'Cpu': 1}]) self.assertEqual(self.k8s_monitor.data['pods'], [{'Memory': 104857600.0, 'Cpu': 0.5}]) def test_k8s_monitor_get_metric_names(self): k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ 'metrics_spec' with mock.patch(k8s_metric_spec, new_callable=mock.PropertyMock) as mock_k8s_metric: mock_k8s_metric.return_value = self.test_metrics_spec names = self.k8s_monitor.get_metric_names() self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) def test_k8s_monitor_get_metric_unit(self): k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ 'metrics_spec' with mock.patch(k8s_metric_spec, new_callable=mock.PropertyMock) as mock_k8s_metric: mock_k8s_metric.return_value = self.test_metrics_spec unit = self.k8s_monitor.get_metric_unit('metric1') self.assertEqual('metric1_unit', unit) def test_k8s_monitor_compute_memory_util(self): test_data = { 'nodes': [ { 'Memory': 20, }, ], 'pods': [ { 'Memory': 10, }, ], } self.k8s_monitor.data = test_data mem_util = self.k8s_monitor.compute_memory_util() self.assertEqual(50, mem_util) test_data = { 'nodes': [], 'pods': [], } self.k8s_monitor.data = test_data mem_util = self.k8s_monitor.compute_memory_util() self.assertEqual(0, mem_util) def test_k8s_monitor_compute_cpu_util(self): test_data = { 'nodes': [ { 'Cpu': 1, }, ], 'pods': [ { 'Cpu': 0.5, }, ], } self.k8s_monitor.data = test_data cpu_util = self.k8s_monitor.compute_cpu_util() self.assertEqual(50, cpu_util) test_data = { 'nodes': [], 'pods': [], } self.k8s_monitor.data = test_data cpu_util = self.k8s_monitor.compute_cpu_util() self.assertEqual(0, cpu_util) def _test_mesos_monitor_pull_data( self, mock_url_get, state_json, expected_mem_total, expected_mem_used, expected_cpu_total, expected_cpu_used): state_json = jsonutils.dumps(state_json) mock_url_get.return_value = state_json self.mesos_monitor.pull_data() self.assertEqual(self.mesos_monitor.data['mem_total'], expected_mem_total) self.assertEqual(self.mesos_monitor.data['mem_used'], expected_mem_used) self.assertEqual(self.mesos_monitor.data['cpu_total'], expected_cpu_total) self.assertEqual(self.mesos_monitor.data['cpu_used'], expected_cpu_used) @mock.patch('magnum.common.urlfetch.get') def test_mesos_monitor_pull_data_success(self, mock_url_get): state_json = { 'leader': 'master@10.0.0.6:5050', 'pid': 'master@10.0.0.6:5050', 'slaves': [{ 'resources': { 'mem': 100, 'cpus': 1, }, 'used_resources': { 'mem': 50, 'cpus': 0.2, } }] } self._test_mesos_monitor_pull_data(mock_url_get, state_json, 100, 50, 1, 0.2) @mock.patch('magnum.common.urlfetch.get') def test_mesos_monitor_pull_data_success_not_leader(self, mock_url_get): state_json = { 'leader': 'master@10.0.0.6:5050', 'pid': 'master@1.1.1.1:5050', 'slaves': [] } self._test_mesos_monitor_pull_data(mock_url_get, state_json, 0, 0, 0, 0) @mock.patch('magnum.common.urlfetch.get') def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get): self.cluster.master_addresses = [] self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0, 0, 0) def test_mesos_monitor_get_metric_names(self): mesos_metric_spec = ('magnum.drivers.mesos_ubuntu_v1.monitor.' 'MesosMonitor.metrics_spec') with mock.patch(mesos_metric_spec, new_callable=mock.PropertyMock) as mock_mesos_metric: mock_mesos_metric.return_value = self.test_metrics_spec names = self.mesos_monitor.get_metric_names() self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) def test_mesos_monitor_get_metric_unit(self): mesos_metric_spec = ('magnum.drivers.mesos_ubuntu_v1.monitor.' 'MesosMonitor.metrics_spec') with mock.patch(mesos_metric_spec, new_callable=mock.PropertyMock) as mock_mesos_metric: mock_mesos_metric.return_value = self.test_metrics_spec unit = self.mesos_monitor.get_metric_unit('metric1') self.assertEqual('metric1_unit', unit) def test_mesos_monitor_compute_memory_util(self): test_data = { 'mem_total': 100, 'mem_used': 50 } self.mesos_monitor.data = test_data mem_util = self.mesos_monitor.compute_memory_util() self.assertEqual(50, mem_util) test_data = { 'mem_total': 0, 'pods': 0, } self.mesos_monitor.data = test_data mem_util = self.mesos_monitor.compute_memory_util() self.assertEqual(0, mem_util) test_data = { 'mem_total': 100, 'mem_used': 0, 'pods': 0, } self.mesos_monitor.data = test_data mem_util = self.mesos_monitor.compute_memory_util() self.assertEqual(0, mem_util) def test_mesos_monitor_compute_cpu_util(self): test_data = { 'cpu_total': 1, 'cpu_used': 0.2, } self.mesos_monitor.data = test_data cpu_util = self.mesos_monitor.compute_cpu_util() self.assertEqual(20, cpu_util) test_data = { 'cpu_total': 100, 'cpu_used': 0, } self.mesos_monitor.data = test_data cpu_util = self.mesos_monitor.compute_cpu_util() self.assertEqual(0, cpu_util) magnum-6.1.0/magnum/tests/unit/test_hacking.py0000666000175100017510000002307513244017334021462 0ustar zuulzuul00000000000000# Copyright 2015 Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import mock import pep8 from magnum.hacking import checks from magnum.tests import base class HackingTestCase(base.TestCase): """Hacking test class. This class tests the hacking checks in magnum.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_assert_equal_in(self): errors = [(1, 0, "M338")] check = checks.assert_equal_in code = "self.assertEqual(a in b, True)" self._assert_has_errors(code, check, errors) code = "self.assertEqual('str' in 'string', True)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(any(a==1 for a in b), True)" self._assert_has_no_errors(code, check) code = "self.assertEqual(True, a in b)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(True, 'str' in 'string')" self._assert_has_errors(code, check, errors) code = "self.assertEqual(True, any(a==1 for a in b))" self._assert_has_no_errors(code, check) code = "self.assertEqual(a in b, False)" self._assert_has_errors(code, check, errors) code = "self.assertEqual('str' in 'string', False)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(any(a==1 for a in b), False)" self._assert_has_no_errors(code, check) code = "self.assertEqual(False, a in b)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(False, 'str' in 'string')" self._assert_has_errors(code, check, errors) code = "self.assertEqual(False, any(a==1 for a in b))" self._assert_has_no_errors(code, check) def test_no_mutable_default_args(self): errors = [(1, 0, "M322")] check = checks.no_mutable_default_args code = "def get_info_from_bdm(virt_type, bdm, mapping=[])" self._assert_has_errors(code, check, errors) code = "defined = []" self._assert_has_no_errors(code, check) code = "defined, undefined = [], {}" self._assert_has_no_errors(code, check) def test_assert_is_not_none(self): errors = [(1, 0, "M302")] check = checks.assert_equal_not_none code = "self.assertEqual(A is not None)" self._assert_has_errors(code, check, errors) code = "self.assertIsNotNone()" self._assert_has_no_errors(code, check) def test_assert_true_isinstance(self): errors = [(1, 0, "M316")] check = checks.assert_true_isinstance code = "self.assertTrue(isinstance(e, exception.BuilAbortException))" self._assert_has_errors(code, check, errors) code = "self.assertTrue()" self._assert_has_no_errors(code, check) def test_no_xrange(self): errors = [(1, 0, "M339")] check = checks.no_xrange code = "xrange(45)" self._assert_has_errors(code, check, errors) code = "range(45)" self._assert_has_no_errors(code, check) def test_no_log_warn(self): errors = [(1, 0, "M352")] check = checks.no_log_warn code = """ LOG.warn("LOG.warn is deprecated") """ self._assert_has_errors(code, check, errors) code = """ LOG.warning("LOG.warn is deprecated") """ self._assert_has_no_errors(code, check) def test_use_timeunitls_utcow(self): errors = [(1, 0, "M310")] check = checks.use_timeutils_utcnow code = "datetime.now" self._assert_has_errors(code, check, errors) code = "datetime.utcnow" self._assert_has_errors(code, check, errors) code = "datetime.aa" self._assert_has_no_errors(code, check) code = "aaa" self._assert_has_no_errors(code, check) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_check_explicit_underscore_import(self): self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "magnum/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from magnum.i18n import _", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from magnum.i18n import _, _LW", "magnum/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "magnum/tests/other_files3.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files3.py"))), 0) magnum-6.1.0/magnum/tests/unit/objects/0000775000175100017510000000000013244017675020075 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/objects/test_fields.py0000666000175100017510000001245113244017334022751 0ustar zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects.tests import test_fields from magnum.objects import fields class TestClusterStatus(test_fields.TestField): def setUp(self): super(TestClusterStatus, self).setUp() self.field = fields.ClusterStatusField() self.coerce_good_values = [('CREATE_IN_PROGRESS', 'CREATE_IN_PROGRESS'), ('CREATE_FAILED', 'CREATE_FAILED'), ('CREATE_COMPLETE', 'CREATE_COMPLETE'), ('UPDATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'), ('UPDATE_FAILED', 'UPDATE_FAILED'), ('UPDATE_COMPLETE', 'UPDATE_COMPLETE'), ('DELETE_IN_PROGRESS', 'DELETE_IN_PROGRESS'), ('DELETE_FAILED', 'DELETE_FAILED'), ('RESUME_COMPLETE', 'RESUME_COMPLETE'), ('RESTORE_COMPLETE', 'RESTORE_COMPLETE'), ('ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'), ('SNAPSHOT_COMPLETE', 'SNAPSHOT_COMPLETE'), ('CHECK_COMPLETE', 'CHECK_COMPLETE'), ('ADOPT_COMPLETE', 'ADOPT_COMPLETE')] self.coerce_bad_values = ['DELETE_STOPPED'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'UPDATE_FAILED'", self.field.stringify('UPDATE_FAILED')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'DELETE_STOPPED') class TestContainerStatus(test_fields.TestField): def setUp(self): super(TestContainerStatus, self).setUp() self.field = fields.ContainerStatusField() self.coerce_good_values = [('Error', 'Error'), ('Running', 'Running'), ('Stopped', 'Stopped'), ('Paused', 'Paused'), ('Unknown', 'Unknown'), ] self.coerce_bad_values = ['DELETED'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'Stopped'", self.field.stringify('Stopped')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'DELETED') class TestClusterType(test_fields.TestField): def setUp(self): super(TestClusterType, self).setUp() self.field = fields.ClusterTypeField() self.coerce_good_values = [('kubernetes', 'kubernetes'), ('swarm', 'swarm'), ('mesos', 'mesos'), ] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'kubernetes'", self.field.stringify('kubernetes')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') class TestMagnumServiceBinary(test_fields.TestField): def setUp(self): super(TestMagnumServiceBinary, self).setUp() self.field = fields.MagnumServiceBinaryField() self.coerce_good_values = [('magnum-conductor', 'magnum-conductor')] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'magnum-conductor'", self.field.stringify('magnum-conductor')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') class TestServerType(test_fields.TestField): def setUp(self): super(TestServerType, self).setUp() self.field = fields.ServerTypeField() self.coerce_good_values = [('vm', 'vm'), ('bm', 'bm'), ] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'vm'", self.field.stringify('vm')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') magnum-6.1.0/magnum/tests/unit/objects/test_x509keypair.py0000666000175100017510000001547013244017334023601 0ustar zuulzuul00000000000000# Copyright 2015 NEC Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestX509KeyPairObject(base.DbTestCase): def setUp(self): super(TestX509KeyPairObject, self).setUp() self.fake_x509keypair = utils.get_test_x509keypair() def test_get_by_id(self): x509keypair_id = self.fake_x509keypair['id'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_id', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair.get(self.context, x509keypair_id) mock_get_x509keypair.assert_called_once_with(self.context, x509keypair_id) self.assertEqual(self.context, x509keypair._context) def test_get_by_uuid(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair.get(self.context, uuid) mock_get_x509keypair.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, x509keypair._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.X509KeyPair.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_x509keypair_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_x509keypair] x509keypairs = objects.X509KeyPair.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(x509keypairs, HasLength(1)) self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) self.assertEqual(self.context, x509keypairs[0]._context) def test_list_all(self): with mock.patch.object(self.dbapi, 'get_x509keypair_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_x509keypair] self.context.all_tenants = True x509keypairs = objects.X509KeyPair.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(x509keypairs, HasLength(1)) self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) self.assertEqual(self.context, x509keypairs[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_x509keypair', autospec=True) as mock_create_x509keypair: mock_create_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair(self.context, **self.fake_x509keypair) x509keypair.create() mock_create_x509keypair.assert_called_once_with( self.fake_x509keypair) self.assertEqual(self.context, x509keypair._context) def test_destroy(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair with mock.patch.object(self.dbapi, 'destroy_x509keypair', autospec=True) as mock_destroy_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) x509keypair.destroy() mock_get_x509keypair.assert_called_once_with(self.context, uuid) mock_destroy_x509keypair.assert_called_once_with(uuid) self.assertEqual(self.context, x509keypair._context) def test_save(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair with mock.patch.object(self.dbapi, 'update_x509keypair', autospec=True) as mock_update_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) x509keypair.certificate = 'new_certificate' x509keypair.save() mock_get_x509keypair.assert_called_once_with(self.context, uuid) mock_update_x509keypair.assert_called_once_with( uuid, {'certificate': 'new_certificate'}) self.assertEqual(self.context, x509keypair._context) def test_refresh(self): uuid = self.fake_x509keypair['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_x509keypair, uuid=uuid), dict(self.fake_x509keypair, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', side_effect=returns, autospec=True) as mock_get_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) self.assertEqual(uuid, x509keypair.uuid) x509keypair.refresh() self.assertEqual(new_uuid, x509keypair.uuid) self.assertEqual(expected, mock_get_x509keypair.call_args_list) self.assertEqual(self.context, x509keypair._context) magnum-6.1.0/magnum/tests/unit/objects/test_cluster_template.py0000666000175100017510000001631713244017334025064 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestClusterTemplateObject(base.DbTestCase): def setUp(self): super(TestClusterTemplateObject, self).setUp() self.fake_cluster_template = utils.get_test_cluster_template() def test_get_by_id(self): cluster_template_id = self.fake_cluster_template['id'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_id', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get(self.context, cluster_template_id) mock_get_cluster_template.assert_called_once_with( self.context, cluster_template_id) self.assertEqual(self.context, cluster_template._context) def test_get_by_uuid(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get(self.context, uuid) mock_get_cluster_template.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, cluster_template._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.ClusterTemplate.get, self.context, 'not-a-uuid') def test_get_by_name(self): name = self.fake_cluster_template['name'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_name', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get_by_name( self.context, name) mock_get_cluster_template.assert_called_once_with(self.context, name) self.assertEqual(self.context, cluster_template._context) def test_list(self): with mock.patch.object(self.dbapi, 'get_cluster_template_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster_template] cluster_templates = objects.ClusterTemplate.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(cluster_templates, HasLength(1)) self.assertIsInstance(cluster_templates[0], objects.ClusterTemplate) self.assertEqual(self.context, cluster_templates[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_cluster_template', autospec=True) as mock_create_cluster_template: mock_create_cluster_template.return_value = \ self.fake_cluster_template cluster_template = objects.ClusterTemplate( self.context, **self.fake_cluster_template) cluster_template.create() mock_create_cluster_template.assert_called_once_with( self.fake_cluster_template) self.assertEqual(self.context, cluster_template._context) def test_destroy(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template with mock.patch.object( self.dbapi, 'destroy_cluster_template', autospec=True)\ as mock_destroy_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) cluster_template.destroy() mock_get_cluster_template.assert_called_once_with(self.context, uuid) mock_destroy_cluster_template.assert_called_once_with(uuid) self.assertEqual(self.context, cluster_template._context) def test_save(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template with mock.patch.object(self.dbapi, 'update_cluster_template', autospec=True) \ as mock_update_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) cluster_template.image_id = 'test-image' cluster_template.save() mock_get_cluster_template.assert_called_once_with(self.context, uuid) mock_update_cluster_template.assert_called_once_with( uuid, {'image_id': 'test-image'}) self.assertEqual(self.context, cluster_template._context) def test_refresh(self): uuid = self.fake_cluster_template['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_cluster_template, uuid=uuid), dict(self.fake_cluster_template, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', side_effect=returns, autospec=True) as mock_get_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) self.assertEqual(uuid, cluster_template.uuid) cluster_template.refresh() self.assertEqual(new_uuid, cluster_template.uuid) self.assertEqual(expected, mock_get_cluster_template.call_args_list) self.assertEqual(self.context, cluster_template._context) magnum-6.1.0/magnum/tests/unit/objects/test_cluster.py0000666000175100017510000002446113244017334023170 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestClusterObject(base.DbTestCase): def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = 'trustee_user' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = 'password' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id) self.fake_cluster['keypair'] = 'keypair1' self.fake_cluster['docker_volume_size'] = 3 self.fake_cluster['labels'] = {} @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_id(self, mock_cluster_template_get): cluster_id = self.fake_cluster['id'] with mock.patch.object(self.dbapi, 'get_cluster_by_id', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get(self.context, cluster_id) mock_get_cluster.assert_called_once_with(self.context, cluster_id) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_uuid(self, mock_cluster_template_get): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get(self.context, uuid) mock_get_cluster.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_name(self, mock_cluster_template_get): name = self.fake_cluster['name'] with mock.patch.object(self.dbapi, 'get_cluster_by_name', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get_by_name(self.context, name) mock_get_cluster.assert_called_once_with(self.context, name) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Cluster.get, self.context, 'not-a-uuid') @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_list(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template clusters = objects.Cluster.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) self.assertEqual(clusters[0].cluster_template_id, clusters[0].cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_list_all(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template self.context.all_tenants = True clusters = objects.Cluster.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_list_with_filters(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template filters = {'name': 'cluster1'} clusters = objects.Cluster.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_create(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'create_cluster', autospec=True) as mock_create_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_create_cluster.return_value = self.fake_cluster cluster = objects.Cluster(self.context, **self.fake_cluster) cluster.create() mock_create_cluster.assert_called_once_with(self.fake_cluster) self.assertEqual(self.context, cluster._context) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_destroy(self, mock_cluster_template_get): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_get_cluster.return_value = self.fake_cluster mock_cluster_template_get.return_value = self.fake_cluster_template with mock.patch.object(self.dbapi, 'destroy_cluster', autospec=True) as mock_destroy_cluster: cluster = objects.Cluster.get_by_uuid(self.context, uuid) cluster.destroy() mock_get_cluster.assert_called_once_with(self.context, uuid) mock_destroy_cluster.assert_called_once_with(uuid) self.assertEqual(self.context, cluster._context) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_save(self, mock_cluster_template_get): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster with mock.patch.object(self.dbapi, 'update_cluster', autospec=True) as mock_update_cluster: cluster = objects.Cluster.get_by_uuid(self.context, uuid) cluster.node_count = 10 cluster.master_count = 5 cluster.save() mock_get_cluster.assert_called_once_with(self.context, uuid) mock_update_cluster.assert_called_once_with( uuid, {'node_count': 10, 'master_count': 5, 'cluster_template': self.fake_cluster_template}) self.assertEqual(self.context, cluster._context) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_refresh(self, mock_cluster_template_get): uuid = self.fake_cluster['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_cluster, uuid=uuid), dict(self.fake_cluster, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', side_effect=returns, autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template cluster = objects.Cluster.get_by_uuid(self.context, uuid) self.assertEqual(uuid, cluster.uuid) cluster.refresh() self.assertEqual(new_uuid, cluster.uuid) self.assertEqual(expected, mock_get_cluster.call_args_list) self.assertEqual(self.context, cluster._context) magnum-6.1.0/magnum/tests/unit/objects/test_federation.py0000666000175100017510000002004713244017334023623 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestFederationObject(base.DbTestCase): def setUp(self): super(TestFederationObject, self).setUp() self.fake_federation = utils.get_test_federation( uuid=uuidutils.generate_uuid(), hostcluster_id=uuidutils.generate_uuid(), member_ids=[] ) def test_get_by_id(self): federation_id = self.fake_federation['id'] with mock.patch.object(self.dbapi, 'get_federation_by_id', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get(self.context, federation_id) mock_get_federation.assert_called_once_with(self.context, federation_id) self.assertEqual(self.context, federation._context) def test_get_by_uuid(self): federation_uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get(self.context, federation_uuid) mock_get_federation.assert_called_once_with(self.context, federation_uuid) self.assertEqual(self.context, federation._context) def test_get_by_name(self): name = self.fake_federation['name'] with mock.patch.object(self.dbapi, 'get_federation_by_name', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get_by_name(self.context, name) mock_get_federation.assert_called_once_with(self.context, name) self.assertEqual(self.context, federation._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Federation.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] federations = objects.Federation.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_list_all(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] self.context.all_tenants = True federations = objects.Federation.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_list_with_filters(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] filters = {'name': 'federation1'} federations = objects.Federation.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_federation', autospec=True) as mock_create_federation: mock_create_federation.return_value = self.fake_federation federation = objects.Federation(self.context, **self.fake_federation) federation.create() mock_create_federation.assert_called_once_with( self.fake_federation) self.assertEqual(self.context, federation._context) def test_destroy(self): uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation with mock.patch.object(self.dbapi, 'destroy_federation', autospec=True) as mock_destroy_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) federation.destroy() mock_get_federation.assert_called_once_with(self.context, uuid) mock_destroy_federation.assert_called_once_with(uuid) self.assertEqual(self.context, federation._context) def test_save(self): uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation with mock.patch.object(self.dbapi, 'update_federation', autospec=True) as mock_update_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) federation.member_ids = ['new-member'] federation.save() mock_get_federation.assert_called_once_with(self.context, uuid) mock_update_federation.assert_called_once_with( uuid, {'member_ids': ['new-member']}) self.assertEqual(self.context, federation._context) def test_refresh(self): uuid = self.fake_federation['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_federation, uuid=uuid), dict(self.fake_federation, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', side_effect=returns, autospec=True) as mock_get_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) self.assertEqual(uuid, federation.uuid) federation.refresh() self.assertEqual(new_uuid, federation.uuid) self.assertEqual(expected, mock_get_federation.call_args_list) self.assertEqual(self.context, federation._context) magnum-6.1.0/magnum/tests/unit/objects/test_magnum_service.py0000666000175100017510000001263613244017334024514 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestMagnumServiceObject(base.DbTestCase): def setUp(self): super(TestMagnumServiceObject, self).setUp() self.fake_magnum_service = utils.get_test_magnum_service() def test_get_by_host_and_binary(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service ms = objects.MagnumService.get_by_host_and_binary(self.context, 'fake-host', 'fake-bin') mock_get_magnum_service.assert_called_once_with('fake-host', 'fake-bin') self.assertEqual(self.context, ms._context) def test_get_by_host_and_binary_no_service(self): with mock.patch.object(self.dbapi, 'create_magnum_service', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = None ms = objects.MagnumService.get_by_host_and_binary(self.context, 'fake-host', 'fake-bin') self.assertIsNone(ms) def test_create(self): with mock.patch.object(self.dbapi, 'create_magnum_service', autospec=True) as mock_create_magnum_service: mock_create_magnum_service.return_value = self.fake_magnum_service ms_dict = {'host': 'fakehost', 'binary': 'fake-bin'} ms = objects.MagnumService(self.context, **ms_dict) ms.create(self.context) mock_create_magnum_service.assert_called_once_with(ms_dict) def test_destroy(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'destroy_magnum_service', autospec=True) as mock_destroy_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') ms.destroy() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') mock_destroy_ms.assert_called_once_with( self.fake_magnum_service['id']) self.assertEqual(self.context, ms._context) def test_save(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'update_magnum_service', autospec=True) as mock_update_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') ms.disabled = True ms.save() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') mock_update_ms.assert_called_once_with( self.fake_magnum_service['id'], {'disabled': True}) self.assertEqual(self.context, ms._context) def test_report_state_up(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'update_magnum_service', autospec=True) as mock_update_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') last_report_count = self.fake_magnum_service['report_count'] ms.report_state_up() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') self.assertEqual(self.context, ms._context) mock_update_ms.assert_called_once_with( self.fake_magnum_service['id'], {'report_count': last_report_count + 1}) magnum-6.1.0/magnum/tests/unit/objects/__init__.py0000666000175100017510000000000013244017334022166 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/objects/test_objects.py0000666000175100017510000004261513244017334023141 0ustar zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import gettext import mock from oslo_versionedobjects import exception as object_exception from oslo_versionedobjects import fields from oslo_versionedobjects import fixture from magnum.common import context as magnum_context from magnum.objects import base from magnum.tests import base as test_base gettext.install('magnum') @base.MagnumObjectRegistry.register class MyObj(base.MagnumPersistentObject, base.MagnumObject): VERSION = '1.0' fields = {'foo': fields.IntegerField(), 'bar': fields.StringField(), 'missing': fields.StringField(), } def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(context) obj.foo = 1 obj.bar = 'bar' obj.obj_reset_changes() return obj @base.remotable def marco(self, context): return 'polo' @base.remotable def update_test(self, context): if context.project_id == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @base.remotable def save(self, context): self.obj_reset_changes() @base.remotable def refresh(self, context): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self, context): self.bar = 'meow' self.save(context) self.foo = 42 class MyObj2(object): @classmethod def obj_name(cls): return 'MyObj' @base.remotable_classmethod def get(cls, *args, **kwargs): pass @base.MagnumObjectRegistry.register_if(False) class TestSubclassedObject(MyObj): fields = {'new_field': fields.StringField()} class _TestObject(object): def test_hydration_type_error(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) def test_hydration_bad_ns(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'foo', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} self.assertRaises(object_exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_dehydration(self): expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual(expected, obj.obj_to_primitive()) def test_get_updates(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_object_property(self): obj = MyObj(self.context, foo=1) self.assertEqual(1, obj.foo) def test_object_property_type_error(self): obj = MyObj(self.context) def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_load(self): obj = MyObj(self.context) self.assertEqual('loaded!', obj.bar) def test_load_in_base(self): @base.MagnumObjectRegistry.register_if(False) class Foo(base.MagnumPersistentObject, base.MagnumObject): fields = {'foobar': fields.IntegerField()} obj = Foo(self.context) # NOTE(danms): Can't use assertRaisesRegexp() because of py26 raised = False ex = None try: obj.foobar except NotImplementedError as e: raised = True ex = e self.assertTrue(raised) self.assertIn('foobar', str(ex)) def test_loaded_in_primitive(self): obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual('loaded!', obj.bar) expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.changes': ['bar'], 'magnum_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(expected, obj.obj_to_primitive()) def test_changes_in_primitive(self): obj = MyObj(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) primitive = obj.obj_to_primitive() self.assertIn('magnum_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(set(['foo']), obj2.obj_what_changed()) obj2.obj_reset_changes() self.assertEqual(set(), obj2.obj_what_changed()) def test_unknown_objtype(self): self.assertRaises(object_exception.UnsupportedObjectError, base.MagnumObject.obj_class_from_name, 'foo', '1.0') def test_with_alternate_context(self): context1 = magnum_context.RequestContext('foo', 'foo') context2 = magnum_context.RequestContext('bar', project_id='alternate') obj = MyObj.query(context1) obj.update_test(context2) self.assertEqual('alternate-context', obj.bar) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(object_exception.OrphanedObjectError, obj.update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.update_test(self.context) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.save(self.context) self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.refresh(self.context) self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(321, obj.foo) self.assertEqual('refreshed', obj.bar) def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(set(['bar']), obj.obj_what_changed()) obj.modify_save_modify(self.context) self.assertEqual(set(['foo']), obj.obj_what_changed()) self.assertEqual(42, obj.foo) self.assertEqual('meow', obj.bar) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual('bar', obj.bar) result = obj.marco(self.context) self.assertEqual('polo', result) def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(1, obj.foo) obj.update_test(self.context) self.assertEqual('updated', obj.bar) def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5) datatime = fields.DateTimeField() obj = MyObj(self.context) obj.created_at = dt obj.updated_at = dt expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.changes': ['created_at', 'updated_at'], 'magnum_object.data': {'created_at': datatime.stringify(dt), 'updated_at': datatime.stringify(dt)} } actual = obj.obj_to_primitive() # magnum_object.changes is built from a set and order is undefined self.assertEqual(sorted(expected['magnum_object.changes']), sorted(actual['magnum_object.changes'])) del expected['magnum_object.changes'], actual['magnum_object.changes'] self.assertEqual(expected, actual) def test_contains(self): obj = MyObj(self.context) self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(self.context, foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_get(self): obj = MyObj(self.context, foo=1) # Foo has value, should not get the default self.assertEqual(1, getattr(obj, 'foo', 2)) # Foo has value, should return the value without error self.assertEqual(1, getattr(obj, 'foo')) # Bar without a default should lazy-load self.assertEqual('loaded!', getattr(obj, 'bar')) # Bar now has a default, but loaded value should be returned self.assertEqual('loaded!', getattr(obj, 'bar', 'not-loaded')) # Invalid attribute should raise AttributeError self.assertFalse(hasattr(obj, 'nothing')) def test_object_inheritance(self): base_fields = list(base.MagnumPersistentObject.fields.keys()) myobj_fields = ['foo', 'bar', 'missing'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(MyObj.fields), len(myobj_fields)) self.assertEqual(set(MyObj.fields.keys()), set(myobj_fields)) self.assertEqual(len(TestSubclassedObject.fields), len(myobj_fields) + len(myobj3_fields)) self.assertEqual(set(TestSubclassedObject.fields.keys()), set(myobj_fields) | set(myobj3_fields)) def test_get_changes(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): @base.MagnumObjectRegistry.register_if(False) class TestObj(base.MagnumPersistentObject, base.MagnumObject): fields = {'foo': fields.IntegerField()} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj(self.context) self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']), set(obj.obj_fields)) def test_obj_constructor(self): obj = MyObj(self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) class TestObject(test_base.TestCase, _TestObject): pass # This is a static dictionary that holds all fingerprints of the versioned # objects registered with the MagnumRegistry. Each fingerprint contains # the version of the object and an md5 hash of RPC-critical parts of the # object (fields and remotable methods). If either the version or hash # change, the static tree needs to be updated. # For more information on object version testing, read # https://docs.openstack.org/magnum/latest/contributor/objects.html object_data = { 'Cluster': '1.17-c32c07425ab0042c7370bef2902b4d21', 'ClusterTemplate': '1.18-7fa94f4fdd027acfb4f022f202afdfb5', 'Certificate': '1.1-1924dc077daa844f0f9076332ef96815', 'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd', 'X509KeyPair': '1.2-d81950af36c59a71365e33ce539d24f9', 'MagnumService': '1.0-2d397ec59b0046bd5ec35cd3e06efeca', 'Stats': '1.0-73a1cd6e3c0294c932a66547faba216c', 'Quota': '1.0-94e100aebfa88f7d8428e007f2049c18', 'Federation': '1.0-166da281432b083f0e4b851336e12e20' } class TestObjectVersions(test_base.TestCase): def test_versions(self): # Test the versions of current objects with the static tree above. # This ensures that any incompatible object changes require a version # bump. classes = base.MagnumObjectRegistry.obj_classes() checker = fixture.ObjectVersionChecker(obj_classes=classes) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, "Fields or remotable methods in some objects have " "changed. Make sure the versions of the objects has " "been bumped, and update the hashes in the static " "fingerprints tree (object_data). For more " "information, read https://docs.openstack.org/" "magnum/latest/contributor/objects.html") class TestObjectSerializer(test_base.TestCase): def test_object_serialization(self): ser = base.MagnumObjectSerializer() obj = MyObj(self.context) primitive = ser.serialize_entity(self.context, obj) self.assertIn('magnum_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.MagnumObjectSerializer() obj = MyObj(self.context) for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertFalse(isinstance(item, base.MagnumObject)) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) @mock.patch('magnum.objects.base.MagnumObject.indirection_api') def _test_deserialize_entity_newer(self, obj_version, backported_to, mock_indirection_api, my_version='1.6'): ser = base.MagnumObjectSerializer() mock_indirection_api.object_backport_versions.side_effect \ = NotImplementedError() mock_indirection_api.object_backport.return_value = 'backported' @base.MagnumObjectRegistry.register class MyTestObj(MyObj): VERSION = my_version obj = MyTestObj() obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertEqual( False, mock_indirection_api.object_backport.called) else: self.assertEqual('backported', result) mock_indirection_api.object_backport.assert_called_with( self.context, primitive, backported_to) def test_deserialize_entity_newer_version_backports_level1(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('11.5', '1.6') def test_deserialize_entity_newer_version_backports_level2(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_same_revision_does_not_backport(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6', None) def test_deserialize_entity_newer_revision_does_not_backport_zero(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): "Test object with supported (newer) revision" self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): "Test object with unsupported (newer) version and revision" self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') magnum-6.1.0/magnum/tests/unit/objects/utils.py0000666000175100017510000001716613244017334021614 0ustar zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum object test utilities.""" import datetime import iso8601 import netaddr from oslo_utils import timeutils import six from magnum.common import exception from magnum.i18n import _ from magnum import objects from magnum.tests.unit.db import utils as db_utils def get_test_cluster_template(context, **kw): """Return a ClusterTemplate object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_cluster_template = db_utils.get_test_cluster_template(**kw) cluster_template = objects.ClusterTemplate(context) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_cluster_template['id'] for key in db_cluster_template: setattr(cluster_template, key, db_cluster_template[key]) return cluster_template def create_test_cluster_template(context, **kw): """Create and return a test ClusterTemplate object. Create a ClusterTemplate in the DB and return a ClusterTemplate object with appropriate attributes. """ cluster_template = get_test_cluster_template(context, **kw) try: cluster_template.create() except exception.ClusterTemplateAlreadyExists: cluster_template = objects.ClusterTemplate.get(context, cluster_template.uuid) return cluster_template def get_test_cluster(context, **kw): """Return a Cluster object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_cluster = db_utils.get_test_cluster(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_cluster['id'] cluster = objects.Cluster(context) for key in db_cluster: setattr(cluster, key, db_cluster[key]) return cluster def create_test_cluster(context, **kw): """Create and return a test Cluster object. Create a Cluster in the DB and return a Cluster object with appropriate attributes. """ cluster = get_test_cluster(context, **kw) create_test_cluster_template(context, uuid=cluster['cluster_template_id'], coe=kw.get('coe', 'swarm'), tls_disabled=kw.get('tls_disabled')) cluster.create() return cluster def get_test_quota(context, **kw): """Return a Quota object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_quota = db_utils.get_test_quota(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_quota['id'] quota = objects.Quota(context) for key in db_quota: setattr(quota, key, db_quota[key]) return quota def create_test_quota(context, **kw): """Create and return a test Quota object. Create a quota in the DB and return a Quota object with appropriate attributes. """ quota = get_test_quota(context, **kw) quota.create() return quota def get_test_x509keypair(context, **kw): """Return a X509KeyPair object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_x509keypair = db_utils.get_test_x509keypair(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_x509keypair['id'] x509keypair = objects.X509KeyPair(context) for key in db_x509keypair: setattr(x509keypair, key, db_x509keypair[key]) return x509keypair def create_test_x509keypair(context, **kw): """Create and return a test x509keypair object. Create a x509keypair in the DB and return a X509KeyPair object with appropriate attributes. """ x509keypair = get_test_x509keypair(context, **kw) x509keypair.create() return x509keypair def get_test_magnum_service_object(context, **kw): """Return a test magnum_service object. Get a magnum_service from DB layer and return an object with appropriate attributes. """ db_magnum_service = db_utils.get_test_magnum_service(**kw) magnum_service = objects.MagnumService(context) for key in db_magnum_service: setattr(magnum_service, key, db_magnum_service[key]) return magnum_service def get_test_federation(context, **kw): """Return a Federation object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_federation = db_utils.get_test_federation(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_federation['id'] federation = objects.Federation(context) for key in db_federation: setattr(federation, key, db_federation[key]) return federation def create_test_federation(context, **kw): """Create and return a test Federation object. Create a Federation in the DB and return a Federation object with appropriate attributes. """ federation = get_test_federation(context, **kw) federation.create() return federation def datetime_or_none(dt): """Validate a datetime or None value.""" if dt is None: return None elif isinstance(dt, datetime.datetime): if dt.utcoffset() is None: # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, # but are returned without a timezone attached. # As a transitional aid, assume a tz-naive object is in UTC. return dt.replace(tzinfo=iso8601.UTC) else: return dt raise ValueError(_("A datetime.datetime is required here")) def datetime_or_str_or_none(val): if isinstance(val, six.string_types): return timeutils.parse_isotime(val) return datetime_or_none(val) def int_or_none(val): """Attempt to parse an integer value, or None.""" if val is None: return val else: return int(val) def str_or_none(val): """Attempt to stringify a value to unicode, or None.""" if val is None: return val else: return six.text_type(val) def ip_or_none(version): """Return a version-specific IP address validator.""" def validator(val, version=version): if val is None: return val else: return netaddr.IPAddress(val, version=version) return validator def dt_serializer(name): """Return a datetime serializer for a named attribute.""" def serializer(self, name=name): if getattr(self, name) is not None: return datetime.datetime.isoformat(getattr(self, name)) else: return None return serializer def dt_deserializer(instance, val): """A deserializer method for datetime attributes.""" if val is None: return None else: return timeutils.parse_isotime(val) magnum-6.1.0/magnum/tests/unit/common/0000775000175100017510000000000013244017675017734 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/cert_manager/0000775000175100017510000000000013244017675022363 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/cert_manager/test_barbican.py0000666000175100017510000002621513244017334025535 0ustar zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from barbicanclient.v1 import client as barbican_client from barbicanclient.v1 import containers from barbicanclient.v1 import secrets import mock from mock import patch from magnum.common.cert_manager import barbican_cert_manager as bcm from magnum.common.cert_manager import cert_manager from magnum.common import exception as magnum_exc from magnum.tests import base class TestBarbicanCert(base.BaseTestCase): def setUp(self): # Certificate data self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" self.certificate_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.certificate ) self.intermediates_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.intermediates ) self.private_key_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key ) self.private_key_passphrase_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key_passphrase ) super(TestBarbicanCert, self).setUp() def test_barbican_cert(self): container = barbican_client.containers.CertificateContainer( api=mock.MagicMock(), certificate=self.certificate_secret, intermediates=self.intermediates_secret, private_key=self.private_key_secret, private_key_passphrase=self.private_key_passphrase_secret ) # Create a cert cert = bcm.Cert( cert_container=container ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) def test_barbican_cert_none_values(self): container = barbican_client.containers.CertificateContainer( api=mock.MagicMock(), certificate=None, intermediates=None, private_key=None, private_key_passphrase=None ) # Create a cert cert = bcm.Cert( cert_container=container ) # Validate the cert functions self.assertIsNone(cert.get_certificate()) self.assertIsNone(cert.get_intermediates()) self.assertIsNone(cert.get_private_key()) self.assertIsNone(cert.get_private_key_passphrase()) class TestBarbicanManager(base.BaseTestCase): def setUp(self): # Make a fake Container and contents self.barbican_endpoint = 'http://localhost:9311/v1' self.container_uuid = uuid.uuid4() self.container_ref = '{0}/containers/{1}'.format( self.barbican_endpoint, self.container_uuid ) self.name = 'My Fancy Cert' self.private_key = mock.Mock(spec=secrets.Secret) self.certificate = mock.Mock(spec=secrets.Secret) self.intermediates = mock.Mock(spec=secrets.Secret) self.private_key_passphrase = mock.Mock(spec=secrets.Secret) container = mock.Mock(spec=containers.CertificateContainer) container.container_ref = self.container_ref container.name = self.name container.private_key = self.private_key container.certificate = self.certificate container.intermediates = self.intermediates container.private_key_passphrase = self.private_key_passphrase self.container = container self.empty_container = mock.Mock(spec=containers.CertificateContainer) self.secret1 = mock.Mock(spec=secrets.Secret) self.secret2 = mock.Mock(spec=secrets.Secret) self.secret3 = mock.Mock(spec=secrets.Secret) self.secret4 = mock.Mock(spec=secrets.Secret) super(TestBarbicanManager, self).setUp() @patch('magnum.common.clients.OpenStackClients.barbican') def test_store_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.create_certificate.return_value = self.empty_container mock_barbican.return_value = bc # Attempt to store a cert bcm.CertManager.store_cert( certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() @patch('magnum.common.clients.OpenStackClients.barbican') def test_store_cert_failure(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.create_certificate.return_value = self.empty_container test_secrets = [ self.secret1, self.secret2, self.secret3, self.secret4 ] bc.secrets.create.side_effect = test_secrets self.empty_container.store.side_effect =\ magnum_exc.CertificateStorageException mock_barbican.return_value = bc # Attempt to store a cert self.assertRaises( magnum_exc.CertificateStorageException, bcm.CertManager.store_cert, certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() # All secrets should be deleted (or at least an attempt made) for s in test_secrets: s.delete.assert_called_once_with() @patch('magnum.common.clients.OpenStackClients.barbican') def test_get_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.register_consumer.return_value = self.container mock_barbican.return_value = bc # Get the container data data = bcm.CertManager.get_cert( cert_ref=self.container_ref, resource_ref=self.container_ref, service_name='Magnum' ) # 'register_consumer' should be called once with the container_ref bc.containers.register_consumer.assert_called_once_with( container_ref=self.container_ref, url=self.container_ref, name='Magnum' ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert_manager.Cert) self.assertEqual(self.private_key.payload, data.get_private_key()) self.assertEqual(self.certificate.payload, data.get_certificate()) self.assertEqual(self.intermediates.payload, data.get_intermediates()) self.assertEqual(self.private_key_passphrase.payload, data.get_private_key_passphrase()) @patch('magnum.common.clients.OpenStackClients.barbican') def test_get_cert_no_registration(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.get.return_value = self.container mock_barbican.return_value = bc # Get the container data data = bcm.CertManager.get_cert( cert_ref=self.container_ref, check_only=True ) # 'get' should be called once with the container_ref bc.containers.get.assert_called_once_with( container_ref=self.container_ref ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert_manager.Cert) self.assertEqual(self.private_key.payload, data.get_private_key()) self.assertEqual(self.certificate.payload, data.get_certificate()) self.assertEqual(self.intermediates.payload, data.get_intermediates()) self.assertEqual(self.private_key_passphrase.payload, data.get_private_key_passphrase()) @patch('magnum.common.clients.OpenStackClients.barbican') def test_delete_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.get.return_value = self.container mock_barbican.return_value = bc # Attempt to delete a cert bcm.CertManager.delete_cert( cert_ref=self.container_ref ) # All secrets should be deleted self.container.certificate.delete.assert_called_once_with() self.container.private_key.delete.assert_called_once_with() self.container.intermediates.delete.assert_called_once_with() self.container.private_key_passphrase.delete.assert_called_once_with() # Container should be deleted once self.container.delete.assert_called_once_with() magnum-6.1.0/magnum/tests/unit/common/cert_manager/test_cert_manager.py0000666000175100017510000000464013244017334026421 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture from magnum.common import cert_manager from magnum.common.cert_manager import barbican_cert_manager as bcm from magnum.common.cert_manager import cert_manager as cert_manager_iface from magnum.common.cert_manager import get_backend from magnum.common.cert_manager import local_cert_manager as lcm from magnum.tests import base class FakeCert(cert_manager_iface.Cert): def get_certificate(self): return 'fake-cert' def get_intermediates(self): return 'fake-intermediates' def get_private_key(self): return 'fake-private-key' def get_private_key_passphrase(self): return 'fake-passphrase' class TestCert(base.BaseTestCase): @mock.patch.object(cert_manager_iface, 'operations') def test_get_decrypted_private_key(self, mock_x509_ops): mock_x509_ops.decrypt_key.return_value = 'fake-key' fake_cert = FakeCert() decrypted_key = fake_cert.get_decrypted_private_key() self.assertEqual('fake-key', decrypted_key) mock_x509_ops.decrypt_key.assert_called_once_with('fake-private-key', 'fake-passphrase') class TestCertManager(base.BaseTestCase): def setUp(self): cert_manager._CERT_MANAGER_PLUGIN = None super(TestCertManager, self).setUp() def test_barbican_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='barbican') self.assertEqual(get_backend().CertManager, bcm.CertManager) def test_local_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='local') self.assertEqual(get_backend().CertManager, lcm.CertManager) magnum-6.1.0/magnum/tests/unit/common/cert_manager/__init__.py0000666000175100017510000000000013244017334024454 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/cert_manager/test_local.py0000666000175100017510000002272413244017334025067 0ustar zuulzuul00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from magnum.common.cert_manager import cert_manager from magnum.common.cert_manager import local_cert_manager from magnum.common import exception from magnum.tests import base class TestLocalCert(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestLocalCert, self).setUp() def test_local_cert(self): # Create a cert cert = local_cert_manager.Cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) class TestLocalManager(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" def _mock_isfile(path): _, ext = os.path.splitext(path) if self.intermediates is None and ext == '.int': return False if self.private_key_passphrase is None and ext == '.pass': return False return True isfile_patcher = mock.patch('os.path.isfile') self.mock_isfile = isfile_patcher.start() self.addCleanup(isfile_patcher.stop) self.mock_isfile.side_effect = _mock_isfile conf = oslo_fixture.Config(cfg.CONF) conf.config(group="certificates", storage_path="/tmp/") super(TestLocalManager, self).setUp() def _open_calls(self, cert_id, mode='w'): open_calls = [] unexpected_calls = [] for ext in ['crt', 'key', 'int', 'pass']: args = [os.path.join('/tmp/{0}.{1}'.format(cert_id, ext))] if mode: args.append(mode) call = mock.call(*args) if ext == 'int' and not self.intermediates: unexpected_calls.append(call) elif ext == 'pass' and not self.private_key_passphrase: unexpected_calls.append(call) else: open_calls.append(call) return open_calls, unexpected_calls def _write_calls(self): write_calls = [ mock.call(self.certificate), mock.call(self.private_key), ] if self.intermediates: write_calls.append(mock.call(self.intermediates)) if self.private_key_passphrase: write_calls.append(mock.call(self.private_key_passphrase)) return write_calls def _store_cert(self): file_mock = mock.mock_open() # Attempt to store the cert with mock.patch('six.moves.builtins.open', file_mock, create=True): cert_id = local_cert_manager.CertManager.store_cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Check that something came back self.assertIsNotNone(cert_id) # Verify the correct files were opened open_calls, unexpected_calls = self._open_calls(cert_id) file_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, file_mock.mock_calls) # Verify the writes were made file_mock().write.assert_has_calls(self._write_calls(), any_order=True) return cert_id def _get_cert(self, cert_id): file_mock = mock.mock_open() # Attempt to retrieve the cert with mock.patch('six.moves.builtins.open', file_mock, create=True): data = local_cert_manager.CertManager.get_cert(cert_id) # Verify the correct files were opened open_calls, unexpected_calls = self._open_calls(cert_id, 'r') file_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, file_mock.mock_calls) # The returned data should be a Cert object self.assertIsInstance(data, cert_manager.Cert) return data def _get_cert_with_fail(self, cert_id, failed='crt'): def fake_open(path, mode): if path == os.path.join('/tmp/{0}.{1}'.format(cert_id, failed)): raise IOError() return mock.DEFAULT file_mock = mock.mock_open() file_mock.side_effect = fake_open # Attempt to retrieve the cert with mock.patch('six.moves.builtins.open', file_mock, create=True): self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.get_cert, cert_id ) def _delete_cert(self, cert_id): remove_mock = mock.Mock() # Delete the cert with mock.patch('os.remove', remove_mock): local_cert_manager.CertManager.delete_cert(cert_id) open_calls, unexpected_calls = self._open_calls(cert_id, mode=None) # Verify the correct files were removed remove_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, remove_mock.mock_calls) def _delete_cert_with_fail(self, cert_id): remove_mock = mock.Mock() remove_mock.side_effect = IOError # Delete the cert with mock.patch('os.remove', remove_mock): self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.delete_cert, cert_id ) def test_store_cert(self): self._store_cert() @mock.patch('six.moves.builtins.open', create=True) def test_store_cert_with_io_error(self, file_mock): file_mock.side_effect = IOError self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.store_cert, certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) def test_get_cert(self): # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_get_cert_with_loading_cert_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='crt') def test_get_cert_with_loading_private_key_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='key') def test_get_cert_with_loading_intermediates_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='int') def test_get_cert_with_loading_pkp_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='pass') def test_get_cert_without_intermediate(self): self.intermediates = None # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_get_cert_without_pkp(self): self.private_key_passphrase = None # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_delete_cert(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert self._delete_cert(cert_id) def test_delete_cert_with_fail(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert with fail self._delete_cert_with_fail(cert_id) def test_delete_cert_without_intermediate(self): self.intermediates = None # Store a cert cert_id = self._store_cert() # Delete the cert with fail self._delete_cert_with_fail(cert_id) def test_delete_cert_without_pkp(self): self.private_key_passphrase = None # Store a cert cert_id = self._store_cert() # Delete the cert with fail self._delete_cert_with_fail(cert_id) magnum-6.1.0/magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py0000666000175100017510000001045713244017334030576 0ustar zuulzuul00000000000000# Copyright 2016 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.common.cert_manager import x509keypair_cert_manager as x509_cm from magnum.common import context from magnum.tests import base from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils class TestX509keypairCert(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestX509keypairCert, self).setUp() def test_x509keypair_cert(self): # Create a cert cert = x509_cm.Cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) class TestX509keypairManager(db_base.DbTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" self.context = context.make_admin_context() super(TestX509keypairManager, self).setUp() def test_store_cert(self): x509keypair = utils.get_test_x509keypair() with mock.patch.object(self.dbapi, 'create_x509keypair', autospec=True) as mock_create_x509keypair: mock_create_x509keypair.return_value = x509keypair uuid = x509_cm.CertManager.store_cert(context=self.context, **x509keypair) self.assertEqual(uuid, '72625085-c507-4410-9b28-cd7cf1fbf1ad') def test_get_cert(self): x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = x509keypair cert_obj = x509_cm.CertManager.get_cert('fake-uuid', context=self.context) self.assertEqual(cert_obj.certificate, 'certificate') self.assertEqual(cert_obj.private_key, 'private_key') self.assertEqual(cert_obj.private_key_passphrase, 'private_key_passphrase') self.assertEqual(cert_obj.intermediates, 'intermediates') mock_get_x509keypair.assert_called_once_with(self.context, 'fake-uuid') def test_delete_cert(self): x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = x509keypair with mock.patch.object(self.dbapi, 'destroy_x509keypair', autospec=True) as mock_destroy_x509keypair: x509_cm.CertManager.delete_cert('fake-uuid', context=self.context) mock_get_x509keypair.assert_called_once_with(self.context, 'fake-uuid') mock_destroy_x509keypair.assert_called_once_with('fake-uuid') magnum-6.1.0/magnum/tests/unit/common/test_context.py0000666000175100017510000001115513244017334023026 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common import context as magnum_context from magnum.tests import base class ContextTestCase(base.TestCase): def _create_context(self, roles=None): return magnum_context.RequestContext(auth_token='auth_token1', auth_url='auth_url1', domain_id='domain_id1', domain_name='domain_name1', user_name='user1', user_id='user-id1', project_name='tenant1', project_id='tenant-id1', roles=roles, is_admin=True, read_only=True, show_deleted=True, request_id='request_id1', trust_id='trust_id1', auth_token_info='token_info1') def test_context(self): ctx = self._create_context() self.assertEqual("auth_token1", ctx.auth_token) self.assertEqual("auth_url1", ctx.auth_url) self.assertEqual("domain_id1", ctx.domain_id) self.assertEqual("domain_name1", ctx.domain_name) self.assertEqual("user1", ctx.user_name) self.assertEqual("user-id1", ctx.user_id) self.assertEqual("tenant1", ctx.project_name) self.assertEqual("tenant-id1", ctx.project_id) self.assertEqual([], ctx.roles) self.assertTrue(ctx.is_admin) self.assertTrue(ctx.read_only) self.assertTrue(ctx.show_deleted) self.assertEqual("request_id1", ctx.request_id) self.assertEqual("trust_id1", ctx.trust_id) self.assertEqual("token_info1", ctx.auth_token_info) def test_context_with_roles(self): ctx = self._create_context(roles=['admin', 'service']) self.assertEqual("auth_token1", ctx.auth_token) self.assertEqual("auth_url1", ctx.auth_url) self.assertEqual("domain_id1", ctx.domain_id) self.assertEqual("domain_name1", ctx.domain_name) self.assertEqual("user1", ctx.user_name) self.assertEqual("user-id1", ctx.user_id) self.assertEqual("tenant1", ctx.project_name) self.assertEqual("tenant-id1", ctx.project_id) for role in ctx.roles: self.assertIn(role, ['admin', 'service']) self.assertTrue(ctx.is_admin) self.assertTrue(ctx.read_only) self.assertTrue(ctx.show_deleted) self.assertEqual("request_id1", ctx.request_id) self.assertEqual("trust_id1", ctx.trust_id) self.assertEqual("token_info1", ctx.auth_token_info) def test_to_dict_from_dict(self): ctx = self._create_context() ctx2 = magnum_context.RequestContext.from_dict(ctx.to_dict()) self.assertEqual(ctx.auth_token, ctx2.auth_token) self.assertEqual(ctx.auth_url, ctx2.auth_url) self.assertEqual(ctx.domain_id, ctx2.domain_id) self.assertEqual(ctx.domain_name, ctx2.domain_name) self.assertEqual(ctx.user_name, ctx2.user_name) self.assertEqual(ctx.user_id, ctx2.user_id) self.assertEqual(ctx.project_id, ctx2.project_id) self.assertEqual(ctx.project_name, ctx2.project_name) self.assertEqual(ctx.project_id, ctx2.project_id) self.assertEqual(ctx.is_admin, ctx2.is_admin) self.assertEqual(ctx.read_only, ctx2.read_only) self.assertEqual(ctx.roles, ctx2.roles) self.assertEqual(ctx.show_deleted, ctx2.show_deleted) self.assertEqual(ctx.request_id, ctx2.request_id) self.assertEqual(ctx.trust_id, ctx2.trust_id) self.assertEqual(ctx.auth_token_info, ctx2.auth_token_info) def test_request_context_sets_is_admin(self): ctxt = magnum_context.make_admin_context() self.assertTrue(ctxt.is_admin) magnum-6.1.0/magnum/tests/unit/common/test_keystone.py0000666000175100017510000002410013244017334023175 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture from keystoneauth1 import exceptions as ka_exception from keystoneauth1 import identity as ka_identity import keystoneclient.exceptions as kc_exception from magnum.common import exception from magnum.common import keystone import magnum.conf from magnum.conf import keystone as ksconf from magnum.tests import base from magnum.tests import utils CONF = magnum.conf.CONF @mock.patch('keystoneclient.v3.client.Client') class KeystoneClientTest(base.TestCase): def setUp(self): super(KeystoneClientTest, self).setUp() dummy_url = 'http://server.test:5000/v3' self.ctx = utils.dummy_context() self.ctx.auth_url = dummy_url self.ctx.auth_token = 'abcd1234' plugin = keystone.ka_loading.get_plugin_loader('password') opts = keystone.ka_loading.get_auth_plugin_conf_options(plugin) cfg_fixture = self.useFixture(fixture.Config()) cfg_fixture.register_opts(opts, group=ksconf.CFG_GROUP) self.config(auth_type='password', auth_url=dummy_url, username='fake_user', password='fake_pass', project_name='fake_project', group=ksconf.CFG_GROUP) self.config(auth_uri=dummy_url, admin_user='magnum', admin_password='varybadpass', admin_tenant_name='service', group=ksconf.CFG_LEGACY_GROUP) # Disable global mocking for trustee_domain_id self.stop_global( 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') def tearDown(self): # Re-enable global mocking for trustee_domain_id. We need this because # mock blows up when trying to stop an already stopped patch (which it # will do due to the addCleanup() in base.TestCase). self.start_global( 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') super(KeystoneClientTest, self).tearDown() def test_client_with_password(self, mock_ks): self.ctx.is_admin = True self.ctx.auth_token_info = None self.ctx.auth_token = None self.ctx.trust_id = None ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session auth_plugin = session.auth mock_ks.assert_called_once_with(session=session, trust_id=None) self.assertIsInstance(auth_plugin, ka_identity.Password) @mock.patch('magnum.common.keystone.ka_loading') @mock.patch('magnum.common.keystone.ka_v3') def test_client_with_password_legacy(self, mock_v3, mock_loading, mock_ks): self.ctx.is_admin = True self.ctx.auth_token_info = None self.ctx.auth_token = None self.ctx.trust_id = None mock_loading.load_auth_from_conf_options.side_effect = \ ka_exception.MissingRequiredOptions(mock.MagicMock()) ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session self.assertWarnsRegex(Warning, '[keystone_authtoken] section is deprecated') mock_v3.Password.assert_called_once_with( auth_url='http://server.test:5000/v3', password='varybadpass', project_domain_id='default', project_name='service', user_domain_id='default', username='magnum') mock_ks.assert_called_once_with(session=session, trust_id=None) @mock.patch('magnum.common.keystone.ka_access') def test_client_with_access_info(self, mock_access, mock_ks): self.ctx.auth_token_info = mock.MagicMock() ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session auth_plugin = session.auth mock_access.create.assert_called_once_with(body=mock.ANY, auth_token='abcd1234') mock_ks.assert_called_once_with(session=session, trust_id=None) self.assertIsInstance(auth_plugin, ka_identity.access.AccessInfoPlugin) @mock.patch('magnum.common.keystone.ka_v3') def test_client_with_token(self, mock_v3, mock_ks): ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session mock_v3.Token.assert_called_once_with( auth_url='http://server.test:5000/v3', token='abcd1234') mock_ks.assert_called_once_with(session=session, trust_id=None) def test_client_with_no_credentials(self, mock_ks): self.ctx.auth_token = None ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.AuthorizationFailure, ks_client._get_auth) mock_ks.assert_not_called() def test_delete_trust(self, mock_ks): mock_ks.return_value.trusts.delete.return_value = None ks_client = keystone.KeystoneClientV3(self.ctx) cluster = mock.MagicMock() cluster.trust_id = 'atrust123' self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123') def test_delete_trust_not_found(self, mock_ks): mock_delete = mock_ks.return_value.trusts.delete mock_delete.side_effect = kc_exception.NotFound() ks_client = keystone.KeystoneClientV3(self.ctx) cluster = mock.MagicMock() cluster.trust_id = 'atrust123' self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) @mock.patch('keystoneauth1.session.Session') def test_create_trust_with_all_roles(self, mock_session, mock_ks): mock_session.return_value.get_user_id.return_value = '123456' mock_session.return_value.get_project_id.return_value = '654321' self.ctx.roles = ['role1', 'role2'] ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.create_trust(trustee_user='888888') mock_ks.return_value.trusts.create.assert_called_once_with( delegation_depth=0, trustor_user='123456', project='654321', trustee_user='888888', role_names=['role1', 'role2'], impersonation=True) @mock.patch('keystoneauth1.session.Session') def test_create_trust_with_limit_roles(self, mock_session, mock_ks): mock_session.return_value.get_user_id.return_value = '123456' mock_session.return_value.get_project_id.return_value = '654321' self.ctx.roles = ['role1', 'role2'] ks_client = keystone.KeystoneClientV3(self.ctx) CONF.set_override('roles', ['role3'], group='trust') ks_client.create_trust(trustee_user='888888') mock_ks.return_value.trusts.create.assert_called_once_with( delegation_depth=0, trustor_user='123456', project='654321', trustee_user='888888', role_names=['role3'], impersonation=True) @mock.patch('magnum.common.keystone.KeystoneClientV3.trustee_domain_id') def test_create_trustee(self, mock_tdi, mock_ks): expected_username = '_username' expected_password = '_password' expected_domain = '_expected_trustee_domain_id' mock_tdi.__get__ = mock.MagicMock(return_value=expected_domain) ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.create_trustee( username=expected_username, password=expected_password, ) mock_ks.return_value.users.create.assert_called_once_with( name=expected_username, password=expected_password, domain=expected_domain, ) @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_auth') @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_session') def test_trustee_domain_id(self, mock_session, mock_auth, mock_ks): expected_domain_id = '_expected_domain_id' _mock_session = mock.MagicMock() mock_session.__get__ = mock.MagicMock(return_value=_mock_session) _mock_auth = mock.MagicMock() mock_auth.__get__ = mock.MagicMock(return_value=_mock_auth) mock_access = mock.MagicMock() mock_access.domain_id = expected_domain_id _mock_auth.get_access.return_value = mock_access ks_client = keystone.KeystoneClientV3(self.ctx) self.assertEqual(expected_domain_id, ks_client.trustee_domain_id) _mock_auth.get_access.assert_called_once_with( _mock_session ) def test_get_validate_region_name(self, mock_ks): key = 'region_name' val = 'RegionOne' CONF.set_override(key, val, 'cinder_client') mock_region = mock.MagicMock() mock_region.id = 'RegionOne' mock_ks.return_value.regions.list.return_value = [mock_region] ks_client = keystone.KeystoneClientV3(self.ctx) region_name = ks_client.get_validate_region_name(val) self.assertEqual('RegionOne', region_name) def test_get_validate_region_name_not_found(self, mock_ks): key = 'region_name' val = 'region123' CONF.set_override(key, val, 'cinder_client') ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.InvalidParameterValue, ks_client.get_validate_region_name, val) def test_get_validate_region_name_is_None(self, mock_ks): key = 'region_name' val = None CONF.set_override(key, val, 'cinder_client') ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.InvalidParameterValue, ks_client.get_validate_region_name, val) magnum-6.1.0/magnum/tests/unit/common/x509/0000775000175100017510000000000013244017675020441 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/x509/test_validator.py0000666000175100017510000001122413244017334024031 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from cryptography import x509 as c_x509 from magnum.common.exception import CertificateValidationError from magnum.common.x509 import validator as v class TestValidators(unittest.TestCase): def _build_key_usage(self, critical=False): # Digital Signature and Key Encipherment are enabled key_usage = c_x509.KeyUsage( True, False, True, False, False, False, False, False, False) return c_x509.Extension(key_usage.oid, critical, key_usage) def _build_basic_constraints(self, ca=False, critical=False): bc = c_x509.BasicConstraints(ca, None) return c_x509.Extension(bc.oid, critical, bc) def test_filter_allowed_extensions(self): key_usage = self._build_key_usage(critical=True) actual = [e for e in v.filter_allowed_extensions([key_usage], ['keyUsage'])] self.assertEqual([key_usage], actual) def test_filter_allowed_extensions_disallowed_but_not_critical(self): key_usage = self._build_key_usage() actual = [e for e in v.filter_allowed_extensions([key_usage], ['subjectAltName'])] self.assertEqual([], actual) def test_filter_allowed_extensions_disallowed(self): key_usage = self._build_key_usage(critical=True) with self.assertRaises(CertificateValidationError): next(v.filter_allowed_extensions([key_usage], ['subjectAltName'])) def test_merge_key_usage(self): key_usage = self._build_key_usage(critical=True) self.assertEqual(key_usage, v._merge_key_usage(key_usage, ['Digital Signature', 'Key Encipherment'])) def test_merge_key_usage_disallowed_but_not_critical(self): key_usage = self._build_key_usage() expected = c_x509.KeyUsage( True, False, False, False, False, False, False, False, False) expected = c_x509.Extension(expected.oid, False, expected) self.assertEqual(expected, v._merge_key_usage(key_usage, ['Digital Signature'])) def test_merge_key_usage_disallowed(self): key_usage = self._build_key_usage(critical=True) with self.assertRaises(CertificateValidationError): v._merge_key_usage(key_usage, ['Digital Signature']) def test_disallow_ca_in_basic_constraints_not_critical(self): bc = self._build_basic_constraints(ca=True) expected = self._build_basic_constraints(ca=False) self.assertEqual(expected, v._disallow_ca_in_basic_constraints(bc)) def test_disallow_ca_in_basic_constraints(self): bc = self._build_basic_constraints(ca=True, critical=True) with self.assertRaises(CertificateValidationError): v._disallow_ca_in_basic_constraints(bc) def test_disallow_ca_in_basic_constraints_with_non_ca(self): bc = self._build_basic_constraints(ca=False) self.assertEqual(bc, v._disallow_ca_in_basic_constraints(bc)) def test_remove_ca_key_usage(self): contains_ca_key_usage = set([ "Digital Signature", "Certificate Sign", "CRL Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_cert_sign(self): contains_ca_key_usage = set(["Digital Signature", "Certificate Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_crl_sign(self): contains_ca_key_usage = set(["Digital Signature", "CRL Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_without_ca_usage(self): contains_ca_key_usage = set(["Digital Signature"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) magnum-6.1.0/magnum/tests/unit/common/x509/test_sign.py0000666000175100017510000002227513244017334023014 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 as c_x509 from cryptography.x509.oid import NameOID import mock import six from magnum.common import exception from magnum.common.x509 import operations from magnum.tests import base class TestX509(base.BaseTestCase): def setUp(self): super(TestX509, self).setUp() self.issuer_name = six.u("fake-issuer") self.subject_name = six.u("fake-subject") self.organization_name = six.u("fake-organization") self.ca_encryption_password = six.b("fake-ca-password") self.encryption_password = six.b("fake-password") def _load_pems(self, keypairs, encryption_password): private_key = serialization.load_pem_private_key( keypairs['private_key'], password=encryption_password, backend=default_backend(), ) certificate = c_x509.load_pem_x509_certificate( keypairs['certificate'], default_backend()) return certificate, private_key def _generate_ca_certificate(self, issuer_name=None): issuer_name = issuer_name or self.issuer_name keypairs = operations.generate_ca_certificate( issuer_name, encryption_password=self.ca_encryption_password) return self._load_pems(keypairs, self.ca_encryption_password) def _generate_client_certificate(self, issuer_name, subject_name): ca = operations.generate_ca_certificate( self.issuer_name, encryption_password=self.ca_encryption_password) keypairs = operations.generate_client_certificate( self.issuer_name, self.subject_name, self.organization_name, ca['private_key'], encryption_password=self.encryption_password, ca_key_password=self.ca_encryption_password, ) return self._load_pems(keypairs, self.encryption_password) def _public_bytes(self, public_key): return public_key.public_bytes( serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo ) def _private_bytes(self, private_key): return private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) def _generate_private_key(self): return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) def _build_csr(self, private_key): csr = c_x509.CertificateSigningRequestBuilder() csr = csr.subject_name(c_x509.Name([ c_x509.NameAttribute(NameOID.COMMON_NAME, self.subject_name) ])) return csr.sign(private_key, hashes.SHA256(), default_backend()) def assertHasPublicKey(self, keypairs): key = keypairs[1] cert = keypairs[0] self.assertEqual(self._public_bytes(key.public_key()), self._public_bytes(cert.public_key())) def assertHasSubjectName(self, cert, subject_name): actual_subject_name = cert.subject.get_attributes_for_oid( c_x509.NameOID.COMMON_NAME) actual_subject_name = actual_subject_name[0].value self.assertEqual(subject_name, actual_subject_name) def assertHasIssuerName(self, cert, issuer_name): actual_issuer_name = cert.issuer.get_attributes_for_oid( c_x509.NameOID.COMMON_NAME) actual_issuer_name = actual_issuer_name[0].value self.assertEqual(issuer_name, actual_issuer_name) def assertInClientExtensions(self, cert): key_usage = c_x509.KeyUsage(True, False, True, False, False, False, False, False, False) key_usage = c_x509.Extension(key_usage.oid, True, key_usage) extended_key_usage = c_x509.ExtendedKeyUsage([c_x509.OID_CLIENT_AUTH]) extended_key_usage = c_x509.Extension(extended_key_usage.oid, False, extended_key_usage) basic_constraints = c_x509.BasicConstraints(ca=False, path_length=None) basic_constraints = c_x509.Extension(basic_constraints.oid, True, basic_constraints) self.assertIn(key_usage, cert.extensions) self.assertIn(extended_key_usage, cert.extensions) self.assertIn(basic_constraints, cert.extensions) def test_generate_ca_certificate_with_bytes_issuer_name(self): issuer_name = six.b("bytes-issuer-name") cert, _ = self._generate_ca_certificate(issuer_name) issuer_name = issuer_name.decode('utf-8') self.assertHasSubjectName(cert, issuer_name) self.assertHasIssuerName(cert, issuer_name) def test_generate_ca_certificate_has_publickey(self): keypairs = self._generate_ca_certificate(self.issuer_name) self.assertHasPublicKey(keypairs) def test_generate_ca_certificate_set_subject_name(self): cert, _ = self._generate_ca_certificate(self.issuer_name) self.assertHasSubjectName(cert, self.issuer_name) def test_generate_ca_certificate_set_issuer_name(self): cert, _ = self._generate_ca_certificate(self.issuer_name) self.assertHasIssuerName(cert, self.issuer_name) def test_generate_ca_certificate_set_extentions_as_ca(self): cert, _ = self._generate_ca_certificate(self.issuer_name) key_usage = c_x509.KeyUsage(False, False, False, False, False, True, False, False, False) key_usage = c_x509.Extension(key_usage.oid, True, key_usage) basic_constraints = c_x509.BasicConstraints(ca=True, path_length=0) basic_constraints = c_x509.Extension(basic_constraints.oid, True, basic_constraints) self.assertIn(key_usage, cert.extensions) self.assertIn(basic_constraints, cert.extensions) def test_generate_client_certificate_has_publickey(self): keypairs = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasPublicKey(keypairs) def test_generate_client_certificate_set_subject_name(self): cert, _ = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasSubjectName(cert, self.subject_name) def test_generate_client_certificate_set_issuer_name(self): cert, key = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasIssuerName(cert, self.issuer_name) def test_generate_client_certificate_set_extentions_as_client(self): cert, key = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertInClientExtensions(cert) def test_load_pem_private_key_with_bytes_private_key(self): private_key = self._generate_private_key() private_key = self._private_bytes(private_key) self.assertIsInstance(private_key, six.binary_type) private_key = operations._load_pem_private_key(private_key) self.assertIsInstance(private_key, rsa.RSAPrivateKey) def test_load_pem_private_key_with_unicode_private_key(self): private_key = self._generate_private_key() private_key = self._private_bytes(private_key) private_key = six.text_type(private_key.decode('utf-8')) self.assertIsInstance(private_key, six.text_type) private_key = operations._load_pem_private_key(private_key) self.assertIsInstance(private_key, rsa.RSAPrivateKey) @mock.patch('cryptography.x509.load_pem_x509_csr') @mock.patch('six.b') def test_sign_with_unicode_csr(self, mock_six, mock_load_pem): ca_key = self._generate_private_key() private_key = self._generate_private_key() csr_obj = self._build_csr(private_key) csr = csr_obj.public_bytes(serialization.Encoding.PEM) csr = six.text_type(csr.decode('utf-8')) mock_load_pem.return_value = csr_obj operations.sign(csr, self.issuer_name, ca_key, skip_validation=True) mock_six.assert_called_once_with(csr) def test_sign_with_invalid_csr(self): ca_key = self._generate_private_key() csr = 'test' csr = six.u(csr) self.assertRaises(exception.InvalidCsr, operations.sign, csr, self.issuer_name, ca_key, skip_validation=True) magnum-6.1.0/magnum/tests/unit/common/x509/__init__.py0000666000175100017510000000000013244017334022532 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/x509/test_operations.py0000666000175100017510000000361113244017334024230 0ustar zuulzuul00000000000000# Copyright 2015 Rackspace, inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.primitives import serialization import mock from magnum.common.x509 import operations from magnum.tests import base class TestX509Operations(base.BaseTestCase): def setUp(self): super(TestX509Operations, self).setUp() @mock.patch.object(serialization, 'NoEncryption') @mock.patch.object(operations, 'default_backend') @mock.patch.object(operations, '_load_pem_private_key') def test_decrypt_key(self, mock_load_pem_private_key, mock_default_backend, mock_no_encryption_class): mock_private_key = mock.MagicMock() mock_load_pem_private_key.return_value = mock_private_key mock_private_key.private_bytes.return_value = mock.sentinel.decrypted actual_decrypted = operations.decrypt_key(mock.sentinel.key, mock.sentinel.passphrase) mock_load_pem_private_key.assert_called_once_with( mock.sentinel.key, mock.sentinel.passphrase) mock_private_key.private_bytes.assert_called_once_with( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=mock_no_encryption_class.return_value ) self.assertEqual(mock.sentinel.decrypted, actual_decrypted) magnum-6.1.0/magnum/tests/unit/common/test_utils.py0000666000175100017510000002577513244017334022517 0ustar zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import os.path import shutil import tempfile import mock from oslo_concurrency import processutils from oslo_utils import netutils from magnum.common import exception from magnum.common import utils import magnum.conf from magnum.tests import base CONF = magnum.conf.CONF class UtilsTestCase(base.TestCase): def test_get_k8s_quantity(self): self.assertEqual(1024000.0, utils.get_k8s_quantity('1000Ki')) self.assertEqual(0.001, utils.get_k8s_quantity('1E-3')) self.assertEqual(0.5, utils.get_k8s_quantity('0.0005k')) self.assertEqual(0.5, utils.get_k8s_quantity('500m')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E+6')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E6')) self.assertRaises(exception.UnsupportedK8sQuantityFormat, utils.get_k8s_quantity, '1E1E') def test_get_docker_quantity(self): self.assertEqual(512, utils.get_docker_quantity('512')) self.assertEqual(512, utils.get_docker_quantity('512b')) self.assertEqual(512 * 1024, utils.get_docker_quantity('512k')) self.assertEqual(512 * 1024 * 1024, utils.get_docker_quantity('512m')) self.assertEqual(512 * 1024 * 1024 * 1024, utils.get_docker_quantity('512g')) self.assertRaises(exception.UnsupportedDockerQuantityFormat, utils.get_docker_quantity, '512bb') self.assertRaises(exception.UnsupportedDockerQuantityFormat, utils.get_docker_quantity, '512B') def test_get_openstasck_ca(self): # openstack_ca_file is empty self.assertEqual('', utils.get_openstack_ca()) # openstack_ca_file is set but the file doesn't exist CONF.set_override('openstack_ca_file', '/tmp/invalid-ca.pem', group='drivers') self.assertRaises(IOError, utils.get_openstack_ca) # openstack_ca_file is set and the file exists CONF.set_override('openstack_ca_file', '/tmp/invalid-ca.pem', group='drivers') with mock.patch('magnum.common.utils.open', mock.mock_open(read_data="CERT"), create=True): self.assertEqual('CERT', utils.get_openstack_ca()) class ExecuteTestCase(base.TestCase): def test_retry_on_failure(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If stdin fails to get passed during one of the runs, make a note. if ! grep -q foo then echo 'failure' > "$1" fi # If stdin has failed to get passed during this or a previous run, exit early. if grep failure "$1" then exit 1 fi runs="$(cat $1)" if [ -z "$runs" ] then runs=0 fi runs=$(($runs + 1)) echo $runs > "$1" exit 1 ''') fp.close() os.chmod(tmpfilename, 0o755) try: self.assertRaises(processutils.ProcessExecutionError, utils.execute, tmpfilename, tmpfilename2, attempts=10, process_input=b'foo', delay_on_retry=False) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise with open(tmpfilename2, 'r') as fp: runs = fp.read() self.assertNotEqual(runs.strip(), 'failure', 'stdin did not ' 'always get passed ' 'correctly') runs = int(runs.strip()) self.assertEqual(10, runs, 'Ran %d times instead of 10.' % runs) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) def test_unknown_kwargs_raises_error(self): self.assertRaises(processutils.UnknownArgumentError, utils.execute, '/usr/bin/env', 'true', this_is_not_a_valid_kwarg=True) def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) self.assertRaises(processutils.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True) def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) try: utils.execute(tmpfilename, tmpfilename2, process_input=b'foo', attempts=2) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) @mock.patch.object(processutils, 'execute') @mock.patch.object(os.environ, 'copy', return_value={}) def test_execute_use_standard_locale_no_env_variables(self, env_mock, execute_mock): utils.execute('foo', use_standard_locale=True) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C'}) @mock.patch.object(processutils, 'execute') def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute('foo', use_standard_locale=True, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C', 'foo': 'bar'}) @mock.patch.object(processutils, 'execute') def test_execute_not_use_standard_locale(self, execute_mock): utils.execute('foo', use_standard_locale=False, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'foo': 'bar'}) def test_execute_get_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: helper = utils._get_root_helper() utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=True, root_helper=helper) def test_execute_without_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False) def test_validate_and_normalize_mac(self): mac = 'AA:BB:CC:DD:EE:FF' with mock.patch.object(netutils, 'is_valid_mac') as m_mock: m_mock.return_value = True self.assertEqual(mac.lower(), utils.validate_and_normalize_mac(mac)) def test_validate_and_normalize_mac_invalid_format(self): with mock.patch.object(netutils, 'is_valid_mac') as m_mock: m_mock.return_value = False self.assertRaises(exception.InvalidMAC, utils.validate_and_normalize_mac, 'invalid-mac') def test_safe_rstrip(self): value = '/test/' rstripped_value = '/test' not_rstripped = '/' self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/')) self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/')) def test_safe_rstrip_not_raises_exceptions(self): # Supplying an integer should normally raise an exception because it # does not save the rstrip() method. value = 10 # In the case of raising an exception safe_rstrip() should return the # original value. self.assertEqual(value, utils.safe_rstrip(value)) class TempFilesTestCase(base.TestCase): def test_tempdir(self): dirname = None with utils.tempdir() as tempdir: self.assertTrue(os.path.isdir(tempdir)) dirname = tempdir self.assertFalse(os.path.exists(dirname)) @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' kwargs = {'a': 'b'} with utils.tempdir(**kwargs) as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir mkdtemp_mock.assert_called_once_with(**kwargs) rmtree_mock.assert_called_once_with(tempdir_created) @mock.patch.object(utils, 'LOG') @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock, log_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' rmtree_mock.side_effect = OSError with utils.tempdir() as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir rmtree_mock.assert_called_once_with(tempdir_created) self.assertTrue(log_mock.error.called) class GeneratePasswordTestCase(base.TestCase): def test_generate_password(self): password = utils.generate_password(length=12) self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) magnum-6.1.0/magnum/tests/unit/common/test_policy.py0000666000175100017510000000342013244017334022635 0ustar zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy as oslo_policy from magnum.common import context as magnum_context from magnum.common import policy from magnum.tests import base class TestPolicy(base.TestCase): def setUp(self): super(TestPolicy, self).setUp() rules_dict = {"context_is_admin": "role:admin"} self.rules = oslo_policy.Rules.from_dict(rules_dict) def test_check_is_admin_with_admin_context_succeeds(self): ctx = magnum_context.RequestContext(user='test-user', project_id='test-project-id', is_admin=True) # explicitly set admin role as this test checks for admin role # with the policy engine ctx.roles = ['admin'] self.assertTrue(policy.check_is_admin(ctx)) def test_check_is_admin_with_user_context_fails(self): ctx = magnum_context.RequestContext(user='test-user', project_id='test-project-id') # there is no admin role set in the context, so check_is_admin # should return False self.assertFalse(policy.check_is_admin(ctx)) magnum-6.1.0/magnum/tests/unit/common/test_clients.py0000666000175100017510000003632513244017334023011 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient.v1 import client as barbicanclient from glanceclient import client as glanceclient from heatclient import client as heatclient import mock from neutronclient.v2_0 import client as neutronclient from novaclient import client as novaclient from magnum.common import clients from magnum.common import exception import magnum.conf from magnum.tests import base CONF = magnum.conf.CONF class ClientsTest(base.BaseTestCase): def setUp(self): super(ClientsTest, self).setUp() CONF.set_override('auth_uri', 'http://server.test:5000/v2.0', group='keystone_authtoken') @mock.patch.object(clients.OpenStackClients, 'keystone') def test_url_for(self, mock_keystone): obj = clients.OpenStackClients(None) obj.url_for(service_type='fake_service', interface='fake_endpoint') mock_endpoint = mock_keystone.return_value.session.get_endpoint mock_endpoint.assert_called_once_with(service_type='fake_service', interface='fake_endpoint') @mock.patch.object(clients.OpenStackClients, 'keystone') def test_magnum_url(self, mock_keystone): fake_region = 'fake_region' fake_endpoint = 'fake_endpoint' CONF.set_override('region_name', fake_region, group='magnum_client') CONF.set_override('endpoint_type', fake_endpoint, group='magnum_client') obj = clients.OpenStackClients(None) obj.magnum_url() mock_endpoint = mock_keystone.return_value.session.get_endpoint mock_endpoint.assert_called_once_with(region_name=fake_region, service_type='container-infra', interface=fake_endpoint) @mock.patch.object(heatclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_heat(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None obj.heat() mock_call.assert_called_once_with( CONF.heat_client.api_version, endpoint='url_from_keystone', username=None, cert_file=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', ca_file=None, key_file=None, password=None, insecure=False) mock_url.assert_called_once_with(service_type='orchestration', interface='publicURL', region_name=expected_region_name) def test_clients_heat(self): self._test_clients_heat(None) def test_clients_heat_region(self): CONF.set_override('region_name', 'myregion', group='heat_client') self._test_clients_heat('myregion') def test_clients_heat_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None self.assertRaises(exception.AuthorizationFailure, obj.heat) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_heat_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None heat = obj.heat() heat_cached = obj.heat() self.assertEqual(heat, heat_cached) @mock.patch.object(glanceclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_glance(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None obj.glance() mock_call.assert_called_once_with( CONF.glance_client.api_version, endpoint='url_from_keystone', username=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', password=None, cacert=None, cert=None, key=None, insecure=False) mock_url.assert_called_once_with(service_type='image', interface='publicURL', region_name=expected_region_name) def test_clients_glance(self): self._test_clients_glance(None) def test_clients_glance_region(self): CONF.set_override('region_name', 'myregion', group='glance_client') self._test_clients_glance('myregion') def test_clients_glance_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None self.assertRaises(exception.AuthorizationFailure, obj.glance) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_glance_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None glance = obj.glance() glance_cached = obj.glance() self.assertEqual(glance, glance_cached) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(barbicanclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') def _test_clients_barbican(self, expected_region_name, mock_url, mock_call, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None obj.barbican() mock_call.assert_called_once_with( endpoint='url_from_keystone', session=keystone.session) mock_keystone.assert_called_once_with() mock_url.assert_called_once_with(service_type='key-manager', interface='publicURL', region_name=expected_region_name) def test_clients_barbican(self): self._test_clients_barbican(None) def test_clients_barbican_region(self): CONF.set_override('region_name', 'myregion', group='barbican_client') self._test_clients_barbican('myregion') def test_clients_barbican_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._barbican = None self.assertRaises(exception.AuthorizationFailure, obj.barbican) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(clients.OpenStackClients, 'url_for') def test_clients_barbican_cached(self, mock_url, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None barbican = obj.barbican() barbican_cached = obj.barbican() self.assertEqual(barbican, barbican_cached) @mock.patch.object(novaclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_nova(self, expected_region_name, mock_auth, mock_url, mock_keystone, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None obj.nova() expected_kwargs = {'session': keystone.session, 'endpoint_override': mock_url.return_value, 'cacert': None, 'insecure': False} mock_call.assert_called_once_with(CONF.nova_client.api_version, **expected_kwargs) mock_url.assert_called_once_with(service_type='compute', interface='publicURL', region_name=expected_region_name) def test_clients_nova(self): self._test_clients_nova(None) def test_clients_nova_region(self): CONF.set_override('region_name', 'myregion', group='nova_client') self._test_clients_nova('myregion') def test_clients_nova_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None self.assertRaises(exception.AuthorizationFailure, obj.nova) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_nova_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_token_info = "auth-token-info" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None nova = obj.nova() nova_cached = obj.nova() self.assertEqual(nova, nova_cached) @mock.patch.object(neutronclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_neutron(self, expected_region_name, mock_auth, mock_url, mock_call): fake_endpoint_type = 'fake_endpoint_type' CONF.set_override('endpoint_type', fake_endpoint_type, group='neutron_client') mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None obj.neutron() mock_call.assert_called_once_with( endpoint_url='url_from_keystone', endpoint_type=fake_endpoint_type, auth_url='keystone_url', token='3bcc3d3a03f44e3d8377f9247b0ad155', ca_cert=None, insecure=False) mock_url.assert_called_once_with(service_type='network', interface=fake_endpoint_type, region_name=expected_region_name) def test_clients_neutron(self): self._test_clients_neutron(None) def test_clients_neutron_region(self): CONF.set_override('region_name', 'myregion', group='neutron_client') self._test_clients_neutron('myregion') def test_clients_neutron_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None self.assertRaises(exception.AuthorizationFailure, obj.neutron) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_neutron_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None neutron = obj.neutron() neutron_cached = obj.neutron() self.assertEqual(neutron, neutron_cached) magnum-6.1.0/magnum/tests/unit/common/test_profiler.py0000666000175100017510000000617213244017334023167 0ustar zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import mock from oslo_config import cfg from oslo_utils import importutils from osprofiler import initializer as profiler_init from osprofiler import opts as profiler_opts import six.moves as six from magnum.common import profiler from magnum import conf from magnum.tests import base class TestProfiler(base.TestCase): def test_all_public_methods_are_traced(self): profiler_opts.set_defaults(conf.CONF) self.config(enabled=True, group='profiler') classes = [ 'magnum.conductor.api.API', 'magnum.conductor.api.ListenerAPI', 'magnum.conductor.handlers.ca_conductor.Handler', 'magnum.conductor.handlers.cluster_conductor.Handler', 'magnum.conductor.handlers.conductor_listener.Handler', 'magnum.conductor.handlers.indirection_api.Handler', 'magnum.service.periodic.MagnumPeriodicTasks', ] for clsname in classes: # give the metaclass and trace_cls() decorator a chance to patch # methods of the classes above six.reload_module( importutils.import_module(clsname.rsplit('.', 1)[0])) cls = importutils.import_class(clsname) for attr, obj in cls.__dict__.items(): # only public methods are traced if attr.startswith('_'): continue # only checks callables if not (inspect.ismethod(obj) or inspect.isfunction(obj)): continue # osprofiler skips static methods if isinstance(obj, staticmethod): continue self.assertTrue(getattr(obj, '__traced__', False), obj) @mock.patch.object(profiler_init, 'init_from_conf') def test_setup_profiler(self, mock_init): self.config(enabled=True, group='profiler') profiler.setup('foo', 'localhost') mock_init.assert_called_once_with(conf=conf.CONF, context=mock.ANY, project="magnum", service='foo', host='localhost') @mock.patch.object(profiler_init, 'init_from_conf') @mock.patch.object(conf, 'CONF', new=cfg.ConfigOpts()) def test_setup_profiler_without_osprofiler(self, mock_init): profiler.setup('foo', 'localhost') self.assertEqual(False, mock_init.called) magnum-6.1.0/magnum/tests/unit/common/test_exception.py0000666000175100017510000000254313244017334023341 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from magnum.common import exception from magnum.i18n import _ from magnum.tests import base class TestMagnumException(exception.MagnumException): message = _("templated %(name)s") class TestException(base.BaseTestCase): def raise_(self, ex): raise ex def test_message_is_templated(self): ex = TestMagnumException(name="NAME") self.assertEqual("templated NAME", str(ex)) def test_custom_message_is_templated(self): ex = TestMagnumException(_("custom templated %(name)s"), name="NAME") self.assertEqual("custom templated NAME", str(ex)) def test_all_exceptions(self): for name, obj in inspect.getmembers(exception): if inspect.isclass(obj) and issubclass(obj, Exception): self.assertRaises(obj, self.raise_, obj()) magnum-6.1.0/magnum/tests/unit/common/test_service.py0000666000175100017510000000412213244017334022776 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_log import log as logging from magnum.common import service from magnum.tests import base class TestMagnumService(base.BaseTestCase): @mock.patch.object(logging, 'register_options') @mock.patch.object(logging, 'setup') @mock.patch('magnum.common.config.set_config_defaults') @mock.patch('magnum.common.config.parse_args') def test_prepare_service_with_argv_not_none(self, mock_parse, mock_set, mock_setup, mock_reg): argv = 'foo' mock_parse.side_effect = lambda *args, **kwargs: None service.prepare_service(argv) mock_parse.assert_called_once_with(argv) mock_setup.assert_called_once_with(base.CONF, 'magnum') mock_reg.assert_called_once_with(base.CONF) mock_set.assert_called_once_with() @mock.patch.object(logging, 'register_options') @mock.patch.object(logging, 'setup') @mock.patch('magnum.common.config.set_config_defaults') @mock.patch('magnum.common.config.parse_args') def test_prepare_service_with_argv_none(self, mock_parse, mock_set, mock_setup, mock_reg): argv = None mock_parse.side_effect = lambda *args, **kwargs: None service.prepare_service(argv) mock_parse.assert_called_once_with([]) mock_setup.assert_called_once_with(base.CONF, 'magnum') mock_reg.assert_called_once_with(base.CONF) mock_set.assert_called_once_with() magnum-6.1.0/magnum/tests/unit/common/__init__.py0000666000175100017510000000000013244017334022025 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/common/test_short_id.py0000666000175100017510000000536213244017334023160 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import testtools from magnum.common import short_id class ShortIdTest(testtools.TestCase): def test_byte_string_8(self): self.assertEqual('\xab', short_id._to_byte_string(0xab, 8)) self.assertEqual('\x05', short_id._to_byte_string(0x05, 8)) def test_byte_string_16(self): self.assertEqual('\xab\xcd', short_id._to_byte_string(0xabcd, 16)) self.assertEqual('\x0a\xbc', short_id._to_byte_string(0xabc, 16)) def test_byte_string_12(self): self.assertEqual('\xab\xc0', short_id._to_byte_string(0xabc, 12)) self.assertEqual('\x0a\xb0', short_id._to_byte_string(0x0ab, 12)) def test_byte_string_60(self): val = 0x111111111111111 byte_string = short_id._to_byte_string(val, 60) self.assertEqual('\x11\x11\x11\x11\x11\x11\x11\x10', byte_string) def test_get_id_string(self): id = short_id.get_id('11111111-1111-4111-bfff-ffffffffffff') self.assertEqual('ceirceirceir', id) def test_get_id_uuid_1(self): source = uuid.UUID('11111111-1111-4111-bfff-ffffffffffff') self.assertEqual(0x111111111111111, source.time) self.assertEqual('ceirceirceir', short_id.get_id(source)) def test_get_id_uuid_f(self): source = uuid.UUID('ffffffff-ffff-4fff-8000-000000000000') self.assertEqual('777777777777', short_id.get_id(source)) def test_get_id_uuid_0(self): source = uuid.UUID('00000000-0000-4000-bfff-ffffffffffff') self.assertEqual('aaaaaaaaaaaa', short_id.get_id(source)) def test_get_id_uuid_endianness(self): source = uuid.UUID('ffffffff-00ff-4000-aaaa-aaaaaaaaaaaa') self.assertEqual('aaaa77777777', short_id.get_id(source)) def test_get_id_uuid1(self): source = uuid.uuid1() self.assertRaises(ValueError, short_id.get_id, source) def test_generate_ids(self): allowed_chars = 'abcdefghijklmnopqrstuvwxyz234567' ids = [short_id.generate_id() for i in range(25)] for id in ids: self.assertEqual(12, len(id)) s = ''.join(ch for ch in id if ch not in allowed_chars) self.assertEqual(s, '') self.assertEqual(1, ids.count(id)) magnum-6.1.0/magnum/tests/unit/common/test_urlfetch.py0000666000175100017510000000360113244017334023153 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch from oslo_config import cfg from magnum.common import urlfetch from magnum.tests import base class TestUrlFetch(base.BaseTestCase): def test_get_unsupported_scheme(self): self.assertRaises(urlfetch.URLFetchError, urlfetch.get, 'https://example.com', ('http')) @patch('requests.get') def test_get(self, mock_request_get): mock_reader = mock.MagicMock() mock_reader.__iter__.return_value = ['a', 'b', 'c'] mock_response = mock.MagicMock() mock_response.iter_content.return_value = mock_reader mock_request_get.return_value = mock_response self.assertEqual('abc', urlfetch.get('http://example.com')) @patch('requests.get') def test_get_exceed_manifest_size(self, mock_request_get): cfg.CONF.set_override("max_manifest_size", 1) mock_reader = mock.MagicMock() mock_reader.__iter__.return_value = ['a', 'b'] mock_response = mock.MagicMock() mock_response.iter_content.return_value = mock_reader mock_request_get.return_value = mock_response self.assertRaises(urlfetch.URLFetchError, urlfetch.get, 'http://example.com') magnum-6.1.0/magnum/tests/unit/common/test_docker_utils.py0000666000175100017510000000760013244017334024031 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import docker import mock from magnum.common import docker_utils import magnum.conf from magnum.tests import base CONF = magnum.conf.CONF class TestDockerUtils(base.BaseTestCase): def test_is_docker_api_version_atleast(self): def fake_version(): return {'ApiVersion': '1.18'} docker_client = mock.MagicMock() docker_client.version.side_effect = fake_version res = docker_utils.is_docker_api_version_atleast(docker_client, '1.21') self.assertFalse(res) class DockerClientTestCase(base.BaseTestCase): def test_docker_client_init(self): client = docker_utils.DockerHTTPClient() self.assertEqual(CONF.docker.docker_remote_api_version, client.api_version) self.assertEqual(CONF.docker.default_timeout, client.timeout) def test_docker_client_init_timeout(self): expected_timeout = 300 client = docker_utils.DockerHTTPClient(timeout=expected_timeout) self.assertEqual(CONF.docker.docker_remote_api_version, client.api_version) self.assertEqual(expected_timeout, client.timeout) def test_docker_client_init_url(self): expected_url = 'http://127.0.0.1:2375' client = docker_utils.DockerHTTPClient(url=expected_url) self.assertEqual(expected_url, client.base_url) self.assertEqual(CONF.docker.docker_remote_api_version, client.api_version) self.assertEqual(CONF.docker.default_timeout, client.timeout) def test_docker_client_init_version(self): expected_version = '1.16' client = docker_utils.DockerHTTPClient(ver=expected_version) self.assertEqual(expected_version, client.api_version) self.assertEqual(CONF.docker.default_timeout, client.timeout) @mock.patch.object(docker.APIClient, 'inspect_container') @mock.patch.object(docker.APIClient, 'containers') def test_list_instances(self, mock_containers, mock_inspect): client = docker_utils.DockerHTTPClient() containers = [dict(Id=x) for x in range(0, 3)] inspect_results = [dict(Config=dict(Hostname=x)) for x in range(0, 3)] mock_containers.return_value = containers mock_inspect.side_effect = inspect_results instances = client.list_instances() self.assertEqual([0, 1, 2], instances) mock_containers.assert_called_once_with(all=True) mock_inspect.assert_has_calls([mock.call(x) for x in range(0, 3)]) @mock.patch.object(docker.APIClient, 'inspect_container') @mock.patch.object(docker.APIClient, 'containers') def test_list_instances_inspect(self, mock_containers, mock_inspect): client = docker_utils.DockerHTTPClient() containers = [dict(Id=x) for x in range(0, 3)] inspect_results = [dict(Config=dict(Hostname=x)) for x in range(0, 3)] mock_containers.return_value = containers mock_inspect.side_effect = inspect_results instances = client.list_instances(inspect=True) self.assertEqual(inspect_results, instances) mock_containers.assert_called_once_with(all=True) mock_inspect.assert_has_calls([mock.call(x) for x in range(0, 3)]) magnum-6.1.0/magnum/tests/unit/common/test_rpc.py0000666000175100017510000002207613244017334022132 0ustar zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from magnum.common import context from magnum.common import rpc from magnum.tests import base class TestRpc(base.TestCase): @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo', timeout=6969) mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser, timeout=6969) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client_profiler_enabled(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo', timeout=6969) mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser, timeout=6969) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' access_policy = dispatcher.DefaultRPCAccessPolicy server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server_profiler_enabled(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' access_policy = dispatcher.DefaultRPCAccessPolicy server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url(url_str='bar') self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, 'bar') @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url_null(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url() self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, None) def test_cleanup_transport_null(self): rpc.TRANSPORT = None rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = None self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.NOTIFIER = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFIER) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_serialize_entity(self): with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') mock_prim.assert_called_once_with('entity', convert_instances=True) class TestRequestContextSerializer(base.TestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_serialize_entity_null_base(self): ser_ent = self.ser_null.serialize_entity('context', 'entity') self.assertEqual('entity', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'RequestContext') def test_deserialize_context(self, mock_req): self.ser.deserialize_context('context') mock_req.from_dict.assert_called_once_with('context') class TestProfilerRequestContextSerializer(base.TestCase): def setUp(self): super(TestProfilerRequestContextSerializer, self).setUp() self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock()) @mock.patch('magnum.common.rpc.profiler') def test_serialize_context(self, mock_profiler): prof = mock_profiler.get.return_value prof.hmac_key = 'swordfish' prof.get_base_id.return_value = 'baseid' prof.get_id.return_value = 'parentid' context = mock.Mock() context.to_dict.return_value = {'project_id': 'test'} self.assertEqual({ 'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid' } }, self.ser.serialize_context(context)) @mock.patch('magnum.common.rpc.profiler') def test_deserialize_context(self, mock_profiler): serialized = {'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid'}} context = self.ser.deserialize_context(serialized) self.assertEqual('test', context.project_id) mock_profiler.init.assert_called_once_with( hmac_key='swordfish', base_id='baseid', parent_id='parentid') magnum-6.1.0/magnum/tests/unit/cmd/0000775000175100017510000000000013244017675017207 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/cmd/test_driver_manage.py0000666000175100017510000000627413244017334023426 0ustar zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from magnum.cmd import driver_manage from magnum.tests import base class TestMagnumDriverManage(base.TestCase): # Fake entrypoints method @staticmethod def _fake_entry(num_of_entries): while num_of_entries: fake_entry = mock.MagicMock() fake_entry.name = 'magnum_' + 'test_' + \ 'foo_' + 'bar'*num_of_entries fake_cls = mock.MagicMock() fake_definition = fake_cls() fake_definition.provides = [{'coe': 'foo', 'os': 'bar', 'server_type': 'test'}] fake_definition.get_template_definition.return_value = \ mock.MagicMock(template_path='fake_path') yield fake_entry, fake_cls num_of_entries -= 1 @mock.patch.object(driver_manage.DriverManager, 'run') @mock.patch('sys.argv', ['foo', 'bar']) def test_none_arg(self, mock_run): args = None driver_manage.main(args) mock_run.assert_called_once_with(['bar']) # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff # produce_output in order to assert with fake value @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') @mock.patch('magnum.drivers.common.driver.Driver') def test_correct_arg_with_details_and_path(self, mock_driver, mock_produce): args = ['list-drivers', '-d', '-p'] mock_driver.load_entry_points.return_value = self._fake_entry(1) driver_manage.main(args) mock_driver.load_entry_points.assert_called_once_with() mock_produce.assert_called_once_with(mock.ANY, mock.ANY, [('magnum_test_foo_bar', 'test', 'bar', 'foo', 'fake_path')]) # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff # produce_output in order to assert with fake value @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') @mock.patch('magnum.drivers.common.driver.Driver') def test_correct_arg_without_details_and_path(self, mock_driver, mock_produce): args = ['list-drivers'] mock_driver.load_entry_points.return_value = self._fake_entry(1) driver_manage.main(args) mock_driver.load_entry_points.assert_called_once_with() mock_produce.assert_called_once_with(mock.ANY, mock.ANY, [('magnum_test_foo_bar',)]) magnum-6.1.0/magnum/tests/unit/cmd/test_conductor.py0000666000175100017510000000450013244017334022611 0ustar zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_concurrency import processutils from magnum.cmd import conductor from magnum.tests import base class TestMagnumConductor(base.TestCase): @mock.patch('oslo_service.service.launch') @mock.patch.object(conductor, 'rpc_service') @mock.patch('magnum.common.service.prepare_service') def test_conductor(self, mock_prep, mock_rpc, mock_launch): conductor.main() server = mock_rpc.Service.create.return_value launcher = mock_launch.return_value mock_prep.assert_called_once_with(mock.ANY) mock_rpc.Service.create.assert_called_once_with( base.CONF.conductor.topic, mock.ANY, mock.ANY, binary='magnum-conductor') workers = processutils.get_worker_count() mock_launch.assert_called_once_with(base.CONF, server, workers=workers) launcher.wait.assert_called_once_with() @mock.patch('oslo_service.service.launch') @mock.patch.object(conductor, 'rpc_service') @mock.patch('magnum.common.service.prepare_service') def test_conductor_config_workers(self, mock_prep, mock_rpc, mock_launch): fake_workers = 8 self.config(workers=fake_workers, group='conductor') conductor.main() server = mock_rpc.Service.create.return_value launcher = mock_launch.return_value mock_prep.assert_called_once_with(mock.ANY) mock_rpc.Service.create.assert_called_once_with( base.CONF.conductor.topic, mock.ANY, mock.ANY, binary='magnum-conductor') mock_launch.assert_called_once_with(base.CONF, server, workers=fake_workers) launcher.wait.assert_called_once_with() magnum-6.1.0/magnum/tests/unit/cmd/test_db_manage.py0000666000175100017510000000441313244017334022511 0ustar zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import six from magnum.cmd import db_manage from magnum.tests import base class TestMagnumDbManage(base.TestCase): def setUp(self): super(TestMagnumDbManage, self).setUp() def clear_conf(): db_manage.CONF.reset() db_manage.CONF.unregister_opt(db_manage.command_opt) clear_conf() self.addCleanup(clear_conf) @mock.patch('magnum.db.migration.version') @mock.patch('sys.argv', ['magnum-db-manage', 'version']) def test_db_manage_version(self, mock_version): with mock.patch('sys.stdout', new=six.StringIO()) as fakeOutput: mock_version.return_value = '123456' db_manage.main() self.assertEqual('Current DB revision is 123456\n', fakeOutput.getvalue()) mock_version.assert_called_once_with() @mock.patch('magnum.db.migration.upgrade') @mock.patch('sys.argv', ['magnum-db-manage', 'upgrade']) def test_db_manage_upgrade(self, mock_upgrade): db_manage.main() mock_upgrade.assert_called_once_with(base.CONF.command.revision) @mock.patch('magnum.db.migration.stamp') @mock.patch('sys.argv', ['magnum-db-manage', 'stamp', 'foo bar']) def test_db_manage_stamp(self, mock_stamp): db_manage.main() mock_stamp.assert_called_once_with('foo bar') @mock.patch('magnum.db.migration.revision') @mock.patch('sys.argv', ['magnum-db-manage', 'revision', '-m', 'foo bar']) def test_db_manage_revision(self, mock_revision): db_manage.main() mock_revision.assert_called_once_with( message='foo bar', autogenerate=base.CONF.command.autogenerate) magnum-6.1.0/magnum/tests/unit/cmd/__init__.py0000666000175100017510000000000013244017334021300 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/cmd/test_api.py0000666000175100017510000001160713244017334021370 0ustar zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_concurrency import processutils from magnum.cmd import api from magnum.tests import base # NOTE(hieulq): need to mock MagnumObject, otherwise other test cases # will be failed because of setting wrong ovo indirection api @mock.patch('magnum.objects.base.MagnumObject') class TestMagnumAPI(base.TestCase): @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_http(self, mock_prep, mock_app, mock_run, mock_base): api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() workers = processutils.get_worker_count() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=workers, ssl_context=None) @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_http_config_workers(self, mock_prep, mock_app, mock_run, mock_base): fake_workers = 8 self.config(workers=fake_workers, group='api') api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=fake_workers, ssl_context=None) @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https_no_cert(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', group='api') mock_exist.return_value = False self.assertRaises(RuntimeError, api.main) mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_not_called() mock_exist.assert_called_once_with('tmp_crt') @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https_no_key(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', ssl_key_file='tmp_key', group='api') mock_exist.side_effect = [True, False] self.assertRaises(RuntimeError, api.main) mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_not_called() mock_exist.assert_has_calls([mock.call('tmp_crt'), mock.call('tmp_key')]) @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', ssl_key_file='tmp_key', group='api') mock_exist.side_effect = [True, True] api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_exist.assert_has_calls([mock.call('tmp_crt'), mock.call('tmp_key')]) workers = processutils.get_worker_count() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=workers, ssl_context=('tmp_crt', 'tmp_key')) magnum-6.1.0/magnum/tests/unit/service/0000775000175100017510000000000013244017675020104 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/service/test_periodic.py0000666000175100017510000004113113244017334023305 0ustar zuulzuul00000000000000# Copyright 2015 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.common import context from magnum.common.rpc_service import CONF from magnum.db.sqlalchemy import api as dbapi from magnum.drivers.common import driver from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.service import periodic from magnum.tests import base from magnum.tests import fake_notifier from magnum.tests import fakes from magnum.tests.unit.db import utils class fake_stack(object): def __init__(self, **kw): for key, val in kw.items(): setattr(self, key, val) class PeriodicTestCase(base.TestCase): def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'trustee_password': 'ain7einaebooVaig6d', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } trust_attrs.update({'id': 1, 'stack_id': '11', 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster1 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 2, 'stack_id': '22', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster2 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 3, 'stack_id': '33', 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster3 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 4, 'stack_id': '44', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster4 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 5, 'stack_id': '55', 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change'}) cluster5 = utils.get_test_cluster(**trust_attrs) self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack( id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack( id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack( id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace("IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace("COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') @mock.patch.object(dbapi.Connection, 'destroy_cluster') def test_sync_cluster_status_changes(self, mock_db_destroy, mock_cluster_list, mock_get_driver): mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4, self.cluster5] mock_get_driver.return_value = self.mock_driver periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_COMPLETE, self.cluster1.status) self.assertEqual('fake_reason_11', self.cluster1.status_reason) # make sure cluster 2 didn't change self.assertEqual(cluster_status.DELETE_IN_PROGRESS, self.cluster2.status) self.assertEqual('no change', self.cluster2.status_reason) self.assertEqual(cluster_status.UPDATE_COMPLETE, self.cluster3.status) self.assertEqual('fake_reason_33', self.cluster3.status_reason) mock_db_destroy.assert_called_once_with(self.cluster4.uuid) self.assertEqual(cluster_status.ROLLBACK_COMPLETE, self.cluster5.status) self.assertEqual('fake_reason_55', self.cluster5.status_reason) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(4, len(notifications)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') def test_sync_cluster_status_not_changes(self, mock_cluster_list, mock_get_driver): self.stack1.stack_status = self.cluster1.status self.stack2.stack_status = self.cluster2.status self.stack3.stack_status = self.cluster3.status self.stack5.stack_status = self.cluster5.status mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster5] mock_get_driver.return_value = self.mock_driver periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, self.cluster1.status) self.assertEqual('no change', self.cluster1.status_reason) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, self.cluster2.status) self.assertEqual('no change', self.cluster2.status_reason) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, self.cluster3.status) self.assertEqual('no change', self.cluster3.status_reason) self.assertEqual(cluster_status.ROLLBACK_IN_PROGRESS, self.cluster5.status) self.assertEqual('no change', self.cluster5.status_reason) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(0, len(notifications)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') @mock.patch.object(dbapi.Connection, 'destroy_cluster') def test_sync_cluster_status_heat_not_found(self, mock_db_destroy, mock_cluster_list, mock_get_driver): self.get_stacks.clear() mock_get_driver.return_value = self.mock_driver mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4, self.cluster5] periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_FAILED, self.cluster1.status) self.assertEqual('Stack 11 not found', self.cluster1.status_reason) self.assertEqual(cluster_status.UPDATE_FAILED, self.cluster3.status) self.assertEqual('Stack 33 not found', self.cluster3.status_reason) self.assertEqual(cluster_status.ROLLBACK_FAILED, self.cluster5.status) self.assertEqual('Stack 55 not found', self.cluster5.status_reason) mock_db_destroy.assert_has_calls([ mock.call(self.cluster2.uuid), mock.call(self.cluster4.uuid) ]) self.assertEqual(2, mock_db_destroy.call_count) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(5, len(notifications)) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_send_cluster_metrics(self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): """Test if RPC notifier receives the expected message""" mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE monitor = mock.MagicMock() monitor.get_metric_names.return_value = ['metric1', 'metric2'] monitor.compute_metric_value.return_value = 30 monitor.get_metric_unit.return_value = '%' mock_create_monitor.return_value = monitor periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) expected_event_type = 'magnum.cluster.metrics.update' expected_metrics = [ { 'name': 'metric1', 'value': 30, 'unit': '%', }, { 'name': 'metric2', 'value': 30, 'unit': '%', }, ] expected_msg = { 'user_id': self.cluster4.user_id, 'project_id': self.cluster4.project_id, 'resource_id': self.cluster4.uuid, 'metrics': expected_metrics } self.assertEqual(1, mock_create_monitor.call_count) notifier.info.assert_called_once_with( self.context, expected_event_type, expected_msg) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_send_cluster_metrics_compute_metric_raise( self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE monitor = mock.MagicMock() monitor.get_metric_names.return_value = ['metric1', 'metric2'] monitor.compute_metric_value.side_effect = Exception( "error on computing metric") mock_create_monitor.return_value = monitor periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) expected_event_type = 'magnum.cluster.metrics.update' expected_msg = { 'user_id': self.cluster4.user_id, 'project_id': self.cluster4.project_id, 'resource_id': self.cluster4.uuid, 'metrics': [] } self.assertEqual(1, mock_create_monitor.call_count) notifier.info.assert_called_once_with( self.context, expected_event_type, expected_msg) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_send_cluster_metrics_pull_data_raise( self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE monitor = mock.MagicMock() monitor.pull_data.side_effect = Exception("error on pulling data") mock_create_monitor.return_value = monitor periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) self.assertEqual(1, mock_create_monitor.call_count) self.assertEqual(0, notifier.info.call_count) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_send_cluster_metrics_monitor_none( self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE mock_create_monitor.return_value = None periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) self.assertEqual(1, mock_create_monitor.call_count) self.assertEqual(0, notifier.info.call_count) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_send_cluster_metrics_disable_pull_data( self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE monitor = mock.MagicMock() monitor.get_metric_names.return_value = ['metric1', 'metric2'] monitor.compute_metric_value.return_value = 30 monitor.get_metric_unit.return_value = '%' mock_create_monitor.return_value = monitor CONF.set_override('send_cluster_metrics', False, group='drivers') periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) self.assertEqual(0, mock_create_monitor.call_count) self.assertEqual(0, notifier.info.call_count) magnum-6.1.0/magnum/tests/unit/service/__init__.py0000666000175100017510000000000013244017334022175 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/servicegroup/0000775000175100017510000000000013244017675021161 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/servicegroup/test_magnum_service.py0000666000175100017510000000600413244017334025570 0ustar zuulzuul00000000000000# Copyright 2015 - Yahoo! Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.common.rpc_service import CONF from magnum import objects from magnum.servicegroup import magnum_service_periodic as periodic from magnum.tests import base class MagnumServicePeriodicTestCase(base.TestCase): def setUp(self): super(MagnumServicePeriodicTestCase, self).setUp() mock_magnum_service_refresh = mock.Mock() class FakeMS(object): report_state_up = mock_magnum_service_refresh self.fake_ms = FakeMS() self.fake_ms_refresh = mock_magnum_service_refresh @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') @mock.patch.object(objects.MagnumService, 'create') @mock.patch.object(objects.MagnumService, 'report_state_up') def test_update_magnum_service_firsttime(self, mock_ms_refresh, mock_ms_create, mock_ms_get ): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') mock_ms_get.return_value = None p_task.update_magnum_service(None) mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, p_task.binary) mock_ms_create.assert_called_once_with() mock_ms_refresh.assert_called_once_with() @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') @mock.patch.object(objects.MagnumService, 'create') def test_update_magnum_service_on_restart(self, mock_ms_create, mock_ms_get): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') mock_ms_get.return_value = self.fake_ms p_task.update_magnum_service(None) mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, p_task.binary) self.fake_ms_refresh.assert_called_once_with() def test_update_magnum_service_regular(self): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') p_task.magnum_service_ref = self.fake_ms p_task.update_magnum_service(None) self.fake_ms_refresh.assert_called_once_with() magnum-6.1.0/magnum/tests/unit/servicegroup/__init__.py0000666000175100017510000000000013244017334023252 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/template/0000775000175100017510000000000013244017675020257 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/template/test_template.py0000666000175100017510000000255413244017334023503 0ustar zuulzuul00000000000000# Copyright 2015 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from glob import glob from oslo_config import cfg from yaml import load from magnum.conf import paths from magnum.tests import base cfg.CONF.register_opts([cfg.StrOpt('template_path', default=paths.basedir_def('templates'), help='Heat template path')]) class TestTemplate(base.TestCase): def test_template_yaml(self): for yml in [y for x in os.walk(cfg.CONF.template_path) for y in glob(os.path.join(x[0], '*.yaml'))]: with open(yml, 'r') as f: yml_contents = f.read() try: load(yml_contents) except Exception: error_msg = "file: %s: %s" % (yml, sys.exc_info()[1]) self.fail(error_msg) magnum-6.1.0/magnum/tests/unit/template/__init__.py0000666000175100017510000000000013244017334022350 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/0000775000175100017510000000000013244017675017215 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/test_validation.py0000666000175100017510000003376013244017334022763 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from six.moves import reload_module from magnum.api import validation as v from magnum.common import exception import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestValidation(base.BaseTestCase): def _test_enforce_cluster_type_supported( self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type, assert_raised=False): @v.enforce_cluster_type_supported() def test(self, cluster): pass server_type, cluster_distro, coe = cluster_type cluster_template = obj_utils.get_test_cluster_template( mock_pecan_request.context, uuid='cluster_template_id', coe=coe, cluster_distro=cluster_distro, server_type=server_type) mock_cluster_template_get_by_uuid.return_value = cluster_template cluster = mock.MagicMock() cluster.cluster_template_id = 'cluster_template_id' cluster.cluster_template = cluster_template mock_cluster_get_by_uuid.return_value = cluster if assert_raised: return self.assertRaises( exception.ClusterTypeNotSupported, test, self, cluster) else: self.assertIsNone(test(self, cluster)) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_enforce_cluster_type_supported( self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, mock_pecan_request): cluster_type = ('vm', 'fedora-atomic', 'kubernetes') self._test_enforce_cluster_type_supported( mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_enforce_cluster_type_not_supported( self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, mock_pecan_request): cluster_type = ('vm', 'foo', 'kubernetes') exc = self._test_enforce_cluster_type_supported( mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type, assert_raised=True) self.assertEqual('Cluster type (vm, foo, kubernetes) not supported.', exc.message) def _test_enforce_network_driver_types_create( self, network_driver_type, network_driver_config_dict, coe='kubernetes', assert_raised=False): @v.enforce_network_driver_types_create() def test(self, cluster_template): pass for key, val in network_driver_config_dict.items(): CONF.set_override(key, val, 'cluster_template') cluster_template = mock.MagicMock() cluster_template.name = 'test_cluster_template' cluster_template.network_driver = network_driver_type cluster_template.coe = coe # Reload the validator module so that ClusterTemplate configs are # re-evaluated. reload_module(v) validator = v.K8sValidator validator.supported_network_drivers = ['flannel', 'type1', 'type2'] if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template) else: test(self, cluster_template) return cluster_template def test_enforce_network_driver_types_one_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type1']}) def test_enforce_network_driver_types_two_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type1', 'type2']}) def test_enforce_network_driver_types_not_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type2']}, assert_raised=True) def test_enforce_network_driver_types_all_allowed_create(self): for driver in ['flannel', 'type1', 'type2']: self._test_enforce_network_driver_types_create( network_driver_type=driver, network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['all']}) def test_enforce_network_driver_types_invalid_coe_create(self): self._test_enforce_network_driver_types_create( network_driver_type='flannel', network_driver_config_dict={}, coe='invalid_coe_type', assert_raised=True) def test_enforce_network_driver_types_default_create(self): cluster_template = self._test_enforce_network_driver_types_create( network_driver_type=None, network_driver_config_dict={}) self.assertEqual('flannel', cluster_template.network_driver) def test_enforce_network_driver_types_default_config_create(self): cluster_template = self._test_enforce_network_driver_types_create( network_driver_type=None, network_driver_config_dict={ 'kubernetes_default_network_driver': 'type1'}) self.assertEqual('type1', cluster_template.network_driver) def test_enforce_network_driver_types_default_invalid_create(self): self._test_enforce_network_driver_types_create( network_driver_type=None, network_driver_config_dict={ 'kubernetes_default_network_driver': 'invalid_driver'}, assert_raised=True) @mock.patch('pecan.request') @mock.patch('magnum.api.utils.get_resource') def _test_enforce_network_driver_types_update( self, mock_get_resource, mock_pecan_request, network_driver_type, network_driver_config_dict, assert_raised=False): @v.enforce_network_driver_types_update() def test(self, cluster_template_ident, patch): pass for key, val in network_driver_config_dict.items(): CONF.set_override(key, val, 'cluster_template') cluster_template_ident = 'test_uuid_or_name' patch = [{'path': '/network_driver', 'value': network_driver_type, 'op': 'replace'}] context = mock_pecan_request.context cluster_template = obj_utils.get_test_cluster_template( context, uuid=cluster_template_ident, coe='kubernetes') cluster_template.network_driver = network_driver_type mock_get_resource.return_value = cluster_template # Reload the validator module so that ClusterTemplate configs are # re-evaluated. reload_module(v) validator = v.K8sValidator validator.supported_network_drivers = ['flannel', 'type1', 'type2'] if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template_ident, patch) else: test(self, cluster_template_ident, patch) mock_get_resource.assert_called_once_with( 'ClusterTemplate', cluster_template_ident) def test_enforce_network_driver_types_one_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type1']}) def test_enforce_network_driver_types_two_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type1', 'type2']}) def test_enforce_network_driver_types_not_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type='type1', network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['type2']}, assert_raised=True) def test_enforce_network_driver_types_all_allowed_update(self): for driver in ['flannel', 'type1', 'type2']: self._test_enforce_network_driver_types_update( network_driver_type=driver, network_driver_config_dict={ 'kubernetes_allowed_network_drivers': ['all']}) def _test_enforce_volume_driver_types_create( self, volume_driver_type, coe='kubernetes', assert_raised=False): @v.enforce_volume_driver_types_create() def test(self, cluster_template): pass cluster_template = obj_utils.get_test_cluster_template( {}, name='test_cluster_template', coe=coe, volume_driver=volume_driver_type) if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template) else: test(self, cluster_template) def test_enforce_volume_driver_types_valid_create(self): self._test_enforce_volume_driver_types_create( volume_driver_type='cinder') def test_enforce_volume_driver_types_invalid_create(self): self._test_enforce_volume_driver_types_create( volume_driver_type='type', assert_raised=True) def _test_enforce_server_type( self, server_type, coe='kubernetes', assert_raised=False): @v.enforce_server_type() def test(self, cluster_template): pass cluster_template = obj_utils.get_test_cluster_template( {}, name='test_cluster_template', coe=coe, server_type=server_type) if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template) else: test(self, cluster_template) def test_enforce_server_type_valid_vm(self): self._test_enforce_server_type( server_type='vm') def test_enforce_server_type_valid_bm(self): self._test_enforce_server_type( server_type='bm') def test_enforce_server_type_invalid(self): self._test_enforce_server_type( server_type='invalid', assert_raised=True) @mock.patch('pecan.request') @mock.patch('magnum.api.utils.get_resource') def _test_enforce_volume_driver_types_update( self, mock_get_resource, mock_pecan_request, volume_driver_type, op, assert_raised=False): @v.enforce_volume_driver_types_update() def test(self, cluster_template_ident, patch): pass cluster_template_ident = 'test_uuid_or_name' patch = [{'path': '/volume_driver', 'value': volume_driver_type, 'op': op}] context = mock_pecan_request.context cluster_template = obj_utils.get_test_cluster_template( context, uuid=cluster_template_ident, coe='kubernetes') mock_get_resource.return_value = cluster_template # Reload the validator module so that ClusterTemplate configs are # re-evaluated. reload_module(v) validator = v.K8sValidator validator.supported_volume_driver = ['cinder'] if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template_ident, patch) else: test(self, cluster_template_ident, patch) mock_get_resource.assert_called_once_with( 'ClusterTemplate', cluster_template_ident) def test_enforce_volume_driver_types_supported_replace_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='replace') def test_enforce_volume_driver_types_not_supported_replace_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='type1', op='replace', assert_raised=True) def test_enforce_volume_driver_types_supported_add_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='add') def test_enforce_volume_driver_types_not_supported_add_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='type1', op='add', assert_raised=True) def test_enforce_volume_driver_types_remove_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='remove') def test_validate_cluster_properties(self): allowed_properties = v.cluster_update_allowed_properties for field in objects.Cluster.fields: if field in allowed_properties: v.validate_cluster_properties(set([field])) else: self.assertRaises(exception.InvalidParameterValue, v.validate_cluster_properties, set([field])) magnum-6.1.0/magnum/tests/unit/api/test_app.py0000666000175100017510000000212113244017334021374 0ustar zuulzuul00000000000000# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.api import app as api_app from magnum.api import config as api_config from magnum.api import hooks from magnum.tests import base class TestAppConfig(base.BaseTestCase): def test_get_pecan_config(self): config = api_app.get_pecan_config() config_d = dict(config.app) self.assertEqual(api_config.app['modules'], config_d['modules']) self.assertEqual(api_config.app['root'], config_d['root']) self.assertIsInstance(config_d['hooks'][0], hooks.ContextHook) magnum-6.1.0/magnum/tests/unit/api/test_expose.py0000666000175100017510000000217113244017334022124 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from magnum.api import expose from magnum.tests import base class TestExpose(base.BaseTestCase): @mock.patch('wsmeext.pecan.wsexpose') def test_expose_with_rest_content_types(self, mock_pecan): self.assertTrue(expose.expose(rest_content_types='json')) mock_pecan.assert_called_with(rest_content_types='json') @mock.patch('wsmeext.pecan.wsexpose') def test_expose_without_rest_content_types(self, mock_pecan): self.assertTrue(expose.expose()) mock_pecan.assert_called_once_with(rest_content_types=('json',)) magnum-6.1.0/magnum/tests/unit/api/test_servicegroup.py0000666000175100017510000001273113244017334023341 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_utils import timeutils import pytz from magnum.api import servicegroup as svc_grp from magnum.tests.unit.api import base as api_base from magnum.tests.unit.objects import utils as obj_util class TestServiceGroup(api_base.FunctionalTest): def setUp(self): super(TestServiceGroup, self).setUp() self.servicegroup_api = svc_grp.ServiceGroup() def test_service_is_up_check_type(self): random_obj = mock.MagicMock() self.assertRaises(TypeError, self.servicegroup_api.service_is_up, random_obj) def test_service_is_up_forced_down(self): kwarg = {'forced_down': True} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_alive(self): kwarg = {'last_seen_up': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_created(self): kwarg = {'created_at': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_updated(self): kwarg = {'updated_at': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_all_three(self): kwarg = {'created_at': timeutils.utcnow(True), 'updated_at': timeutils.utcnow(True), 'last_seen_up': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_latest_update(self): kwarg = {'created_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC), 'updated_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC), 'last_seen_up': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_down(self): kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_create(self): kwarg = {'created_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_update(self): kwarg = {'updated_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_all_three(self): kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC), 'created_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC), 'updated_at': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_old_update(self): kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC), 'created_at': timeutils.utcnow(True), 'updated_at': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) magnum-6.1.0/magnum/tests/unit/api/base.py0000666000175100017510000002451213244017334020477 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: Ported from ceilometer/tests/api.py (subsequently moved to # ceilometer/tests/api/__init__.py). This should be oslo'ified: # https://bugs.launchpad.net/ironic/+bug/1255115. # NOTE(deva): import auth_token so we can override a config option from keystonemiddleware import auth_token # noqa import mock from oslo_config import cfg import pecan import pecan.testing from six.moves.urllib import parse as urlparse from magnum.api import hooks from magnum.tests.unit.db import base PATH_PREFIX = '/v1' class FunctionalTest(base.DbTestCase): """Base class for API tests. Pecan controller test. Used for functional tests of Pecan controllers where you need to test your literal application and its integration with the framework. """ def setUp(self): super(FunctionalTest, self).setUp() cfg.CONF.set_override("auth_version", "v2.0", group='keystone_authtoken') cfg.CONF.set_override("admin_user", "admin", group='keystone_authtoken') # Determine where we are so we can set up paths in the config self.config = { 'app': { 'root': 'magnum.api.controllers.root.RootController', 'modules': ['magnum.api'], 'hooks': [ hooks.ContextHook(), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), ], }, } self.app = self._make_app() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) p = mock.patch('magnum.api.controllers.v1.Controller._check_version') self._check_version = p.start() self.addCleanup(p.stop) def _verify_attrs(self, attrs, response, positive=True): if positive is True: verify_method = self.assertIn else: verify_method = self.assertNotIn for attr in attrs: verify_method(attr, response) def _make_app(self, config=None): if not config: config = self.config return pecan.testing.load_test_app(config) def _request_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path print('%s: %s %s' % (method.upper(), full_path, params)) response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) print('GOT:%s' % response) return response def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="post") def patch_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PATCH request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="patch") def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path print('DELETE: %s' % (full_path)) response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) print('GOT:%s' % response) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, path_prefix=PATH_PREFIX, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value;whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param path_prefix: prefix of the url path :param params: content for wsgi.input of request """ if q is None: q = [] full_path = path_prefix + path query_params = {'q.field': [], 'q.value': [], 'q.op': [], } for query in q: for name in ['field', 'op', 'value']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) print('GET: %s %r' % (full_path, all_params)) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors) if not expect_errors: response = response.json print('GOT:%s' % response) return response def validate_link(self, link, bookmark=False): """Checks if the given link can get correct data.""" # removes the scheme and net location parts of the link url_parts = list(urlparse.urlparse(link)) url_parts[0] = url_parts[1] = '' # bookmark link should not have the version in the URL if bookmark and url_parts[2].startswith(PATH_PREFIX): return False full_path = urlparse.urlunparse(url_parts) try: self.get_json(full_path, path_prefix='') return True except Exception: return False magnum-6.1.0/magnum/tests/unit/api/test_attr_validator.py0000666000175100017510000003675013244017334023652 0ustar zuulzuul00000000000000# Copyright 2015 EasyStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glanceclient import exc as glance_exception import mock from novaclient import exceptions as nova_exc from magnum.api import attr_validator from magnum.common import exception from magnum.tests import base class TestAttrValidator(base.BaseTestCase): def test_validate_flavor_with_vaild_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor' mock_flavor.id = 'test_flavor_id' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_flavor(mock_os_cli, 'test_flavor') self.assertTrue(mock_nova.flavors.list.called) def test_validate_flavor_with_none_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor' mock_flavor.id = 'test_flavor_id' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_flavor(mock_os_cli, None) self.assertEqual(False, mock_nova.flavors.list.called) def test_validate_flavor_with_invalid_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor_not_equal' mock_flavor.id = 'test_flavor_id_not_equal' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova self.assertRaises(exception.FlavorNotFound, attr_validator.validate_flavor, mock_os_cli, 'test_flavor') def test_validate_external_network_with_valid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net', 'id': 'test_ext_net_id'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron attr_validator.validate_external_network(mock_os_cli, 'test_ext_net') self.assertTrue(mock_neutron.list_networks.called) def test_validate_external_network_with_multiple_valid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net', 'id': 'test_ext_net_id1'}, {'name': 'test_ext_net', 'id': 'test_ext_net_id2'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.Conflict, attr_validator.validate_external_network, mock_os_cli, 'test_ext_net') def test_validate_external_network_with_invalid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net_not_equal', 'id': 'test_ext_net_id_not_equal'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.ExternalNetworkNotFound, attr_validator.validate_external_network, mock_os_cli, 'test_ext_net') def test_validate_keypair_with_valid_keypair(self): mock_keypair = mock.MagicMock() mock_keypair.id = 'test-keypair' mock_nova = mock.MagicMock() mock_nova.keypairs.get.return_value = mock_keypair mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_keypair(mock_os_cli, 'test-keypair') def test_validate_keypair_with_invalid_keypair(self): mock_nova = mock.MagicMock() mock_nova.keypairs.get.side_effect = nova_exc.NotFound('test-keypair') mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova self.assertRaises(exception.KeyPairNotFound, attr_validator.validate_keypair, mock_os_cli, 'test_keypair') def test_validate_labels_main_no_label(self): fake_labels = {} attr_validator.validate_labels(fake_labels) def test_validate_labels_main_isolation_invalid_label(self): fake_labels = {'mesos_slave_isolation': 'abc'} self.assertRaises(exception.InvalidParameterValue, attr_validator.validate_labels, fake_labels) def test_validate_labels_isolation_valid(self): fake_labels = {'mesos_slave_isolation': 'filesystem/posix,filesystem/linux'} attr_validator.validate_labels_isolation(fake_labels) def test_validate_labels_main_with_valid_providers_none_isolation(self): fake_labels = {'mesos_slave_image_providers': 'docker'} self.assertRaises(exception.RequiredParameterNotProvided, attr_validator.validate_labels, fake_labels) def test_validate_labels_with_valid_providers_invalid_isolation(self): fake_labels = {'mesos_slave_image_providers': 'docker', 'mesos_slave_isolation': 'abc'} self.assertRaises(exception.RequiredParameterNotProvided, attr_validator.validate_labels_image_providers, fake_labels) def test_validate_labels_with_valid_providers_invalid_providers(self): fake_labels = {'mesos_slave_image_providers': 'appc'} attr_validator.validate_labels_image_providers(fake_labels) def test_validate_labels_with_invalid_providers(self): fake_labels = {'mesos_slave_image_providers': 'abc'} self.assertRaises(exception.InvalidParameterValue, attr_validator.validate_labels_image_providers, fake_labels) def test_validate_labels_with_valid_providers_none_isolation(self): fake_labels = {'mesos_slave_image_providers': 'docker'} self.assertRaises(exception.RequiredParameterNotProvided, attr_validator.validate_labels_image_providers, fake_labels) def test_validate_labels_with_valid_providers_valid_isolation(self): fake_labels = {'mesos_slave_image_providers': 'docker', 'mesos_slave_isolation': 'docker/runtime'} attr_validator.validate_labels_image_providers(fake_labels) def test_validate_labels_with_environment_variables_valid_json(self): contents = '{"step": "upgrade", "interface": "deploy"}' fack_labels = {'mesos_slave_executor_env_variables': contents} attr_validator.validate_labels_executor_env_variables( fack_labels) def test_validate_labels_with_environment_variables_bad_json(self): fack_labels = {'mesos_slave_executor_env_variables': 'step'} self.assertRaisesRegex( exception.InvalidParameterValue, "Json format error", attr_validator.validate_labels_executor_env_variables, fack_labels) def test_validate_labels_with_valid_isolation(self): fake_labels = {'mesos_slave_isolation': 'filesystem/posix,filesystem/linux'} attr_validator.validate_labels_isolation(fake_labels) def test_validate_labels_isolation_invalid(self): fake_labels = {'mesos_slave_isolation': 'filesystem'} self.assertRaises(exception.InvalidParameterValue, attr_validator.validate_labels_isolation, fake_labels) def test_validate_labels_strategy_valid(self): fake_labels = {'swarm_strategy': 'spread'} attr_validator.validate_labels_strategy(fake_labels) def test_validate_labels_strategy_missing(self): fake_labels = {'strategy': 'spread'} attr_validator.validate_labels_strategy(fake_labels) def test_validate_labels_strategy_invalid(self): fake_labels = {'swarm_strategy': 'invalid'} self.assertRaises(exception.InvalidParameterValue, attr_validator.validate_labels_strategy, fake_labels) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_valid_image_by_name(self, mock_os_res): mock_image = {'name': 'fedora-21-atomic-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-atomic'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() attr_validator.validate_image(mock_os_cli, 'fedora-21-atomic-5') self.assertTrue(mock_os_res.called) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_forbidden_image(self, mock_os_res): def glance_side_effect(cli, image, name): raise glance_exception.HTTPForbidden() mock_os_res.side_effect = glance_side_effect mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotAuthorized, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_valid_image_by_id(self, mock_os_res): mock_image = {'name': 'fedora-21-atomic-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-atomic'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() attr_validator.validate_image(mock_os_cli, 'e33f0988-1730-405e-8401-30cbc8535302') self.assertTrue(mock_os_res.called) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_nonexist_image_by_name(self, mock_os_res): mock_os_res.side_effect = exception.ResourceNotFound mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_nonexist_image_by_id(self, mock_os_res): mock_os_res.side_effect = glance_exception.NotFound mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_multi_images_same_name(self, mock_os_res): mock_os_res.side_effect = exception.Conflict mock_os_cli = mock.MagicMock() self.assertRaises(exception.Conflict, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_without_os_distro(self, mock_os_res): mock_image = {'name': 'fedora-21-atomic-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() self.assertRaises(exception.OSDistroFieldNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_when_user_forbidden(self, mock_os_res): mock_image = {'name': 'fedora-21-atomic-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': ''} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() self.assertRaises(exception.OSDistroFieldNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-atomic-5') @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_invalid_flavor(self, mock_os_cli): mock_cluster_template = {'flavor_id': 'test_flavor'} mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor_not_equal' mock_flavor.id = 'test_flavor_id_not_equal' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli.nova.return_value = mock_nova mock_context = mock.MagicMock() self.assertRaises(exception.FlavorNotFound, attr_validator.validate_os_resources, mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.api.attr_validator.validate_labels') def test_validate_os_resources_with_label(self, mock_validate_labels, mock_os_cli): mock_cluster_template = {'labels': {'mesos_slave_isolation': 'abc'}} mock_context = mock.MagicMock() self.assertRaises(exception.InvalidParameterValue, attr_validator.validate_os_resources, mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.api.attr_validator.validators') def test_validate_os_resources_without_validator(self, mock_validators, mock_os_cli): mock_cluster_template = {} mock_context = mock.MagicMock() attr_validator.validate_os_resources(mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_cluster(self, mock_os_cli): mock_cluster_template = {} mock_cluster = { 'keypair': 'test-keypair', 'labels': {'lab1': 'val1'}, 'image_id': 'e33f0988-1730-405e-8401-30cbc8535302' } mock_keypair = mock.MagicMock() mock_keypair.id = 'test-keypair' mock_image = {'name': 'fedora-21-atomic-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-atomic'} mock_nova = mock.MagicMock() mock_nova.keypairs.get.return_value = mock_keypair mock_nova.images.get.return_value = mock_image mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova mock_context = mock.MagicMock() attr_validator.validate_os_resources(mock_context, mock_cluster_template, mock_cluster) magnum-6.1.0/magnum/tests/unit/api/controllers/0000775000175100017510000000000013244017675021563 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/controllers/auth-root-access.ini0000666000175100017510000000113313244017334025435 0ustar zuulzuul00000000000000[pipeline:main] pipeline = cors healthcheck request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = / paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable magnum-6.1.0/magnum/tests/unit/api/controllers/auth-v1-access.ini0000666000175100017510000000113513244017334025002 0ustar zuulzuul00000000000000[pipeline:main] pipeline = cors healthcheck request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = /v1 paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable magnum-6.1.0/magnum/tests/unit/api/controllers/v1/0000775000175100017510000000000013244017675022111 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_utils.py0000666000175100017510000001476213244017334024666 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonpatch import mock from oslo_utils import uuidutils import wsme from magnum.api import utils from magnum.common import exception import magnum.conf from magnum.tests.unit.api import base CONF = magnum.conf.CONF class TestApiUtils(base.FunctionalTest): def test_validate_limit(self): limit = utils.validate_limit(10) self.assertEqual(10, 10) # max limit limit = utils.validate_limit(999999999) self.assertEqual(CONF.api.max_limit, limit) # negative self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) # zero self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) def test_validate_sort_dir(self): sort_dir = utils.validate_sort_dir('asc') self.assertEqual('asc', sort_dir) # invalid sort_dir parameter self.assertRaises(wsme.exc.ClientSideError, utils.validate_sort_dir, 'fake-sort') @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_name') @mock.patch('magnum.objects.Cluster.get_by_uuid') def test_get_resource_with_uuid( self, mock_get_by_uuid, mock_get_by_name, mock_request): mock_cluster = mock.MagicMock mock_get_by_uuid.return_value = mock_cluster uuid = uuidutils.generate_uuid() returned_cluster = utils.get_resource('Cluster', uuid) mock_get_by_uuid.assert_called_once_with(mock_request.context, uuid) self.assertFalse(mock_get_by_name.called) self.assertEqual(mock_cluster, returned_cluster) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_name') @mock.patch('magnum.objects.Cluster.get_by_uuid') def test_get_resource_with_name( self, mock_get_by_uuid, mock_get_by_name, mock_request): mock_cluster = mock.MagicMock mock_get_by_name.return_value = mock_cluster returned_cluster = utils.get_resource('Cluster', 'fake-name') self.assertFalse(mock_get_by_uuid.called) mock_get_by_name.assert_called_once_with(mock_request.context, 'fake-name') self.assertEqual(mock_cluster, returned_cluster) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=True) def test_get_openstack_resource_by_uuid(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.get.return_value = 'fake_resource_data' resource_data = utils.get_openstack_resource(fake_manager, 'fake_resource', 'fake_resource_type') self.assertEqual('fake_resource_data', resource_data) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_by_name(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = ['fake_resource_data'] resource_data = utils.get_openstack_resource(fake_manager, 'fake_resource', 'fake_resource_type') self.assertEqual('fake_resource_data', resource_data) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_non_exist(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = [] self.assertRaises(exception.ResourceNotFound, utils.get_openstack_resource, fake_manager, 'fake_resource', 'fake_resource_type') @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_multi_exist(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = ['fake_resource_data1', 'fake_resource_data2'] self.assertRaises(exception.Conflict, utils.get_openstack_resource, fake_manager, 'fake_resource', 'fake_resource_type') @mock.patch.object(jsonpatch, 'apply_patch') def test_apply_jsonpatch(self, mock_jsonpatch): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/node_count", "value": 2, "op": "replace"}] utils.apply_jsonpatch(doc, patch) mock_jsonpatch.assert_called_once_with(doc, patch) def test_apply_jsonpatch_add_attr_not_exist(self): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/fake", "value": 2, "op": "add"}] exc = self.assertRaises(wsme.exc.ClientSideError, utils.apply_jsonpatch, doc, patch) self.assertEqual( "Adding a new attribute /fake to the root of the resource is " "not allowed.", exc.faultstring) def test_apply_jsonpatch_add_attr_already_exist(self): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/node_count", "value": 2, "op": "add"}] exc = self.assertRaises(wsme.exc.ClientSideError, utils.apply_jsonpatch, doc, patch) self.assertEqual( "The attribute /node_count has existed, please use " "'replace' operation instead.", exc.faultstring) def test_validate_docker_memory(self): utils.validate_docker_memory('512m') utils.validate_docker_memory('512g') self.assertRaises(wsme.exc.ClientSideError, utils.validate_docker_memory, "512gg") # Docker require that Minimum memory limit >= 4M self.assertRaises(wsme.exc.ClientSideError, utils.validate_docker_memory, "3m") magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_baymodel.py0000666000175100017510000014437013244017334025321 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_utils import timeutils from oslo_utils import uuidutils from six.moves.urllib import parse as urlparse from webtest.app import AppError from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import baymodel as api_baymodel from magnum.common import exception from magnum.common import policy as magnum_policy import magnum.conf from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestBayModelObject(base.TestCase): def test_baymodel_init(self): baymodel_dict = apiutils.baymodel_post_data() del baymodel_dict['image_id'] baymodel = api_baymodel.BayModel(**baymodel_dict) self.assertEqual(wtypes.Unset, baymodel.image_id) class TestListBayModel(api_base.FunctionalTest): _baymodel_attrs = ('name', 'apiserver_port', 'network_driver', 'coe', 'flavor_id', 'fixed_network', 'dns_nameserver', 'http_proxy', 'docker_volume_size', 'server_type', 'cluster_distro', 'external_network_id', 'image_id', 'registry_enabled', 'no_proxy', 'keypair_id', 'https_proxy', 'tls_disabled', 'public', 'labels', 'master_flavor_id', 'volume_driver', 'insecure_registry') def test_empty(self): response = self.get_json('/baymodels') self.assertEqual([], response['baymodels']) def test_one(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/baymodels') self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"]) self._verify_attrs(self._baymodel_attrs, response['baymodels'][0]) def test_get_one(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/baymodels/%s' % baymodel['uuid']) self.assertEqual(baymodel.uuid, response['uuid']) self._verify_attrs(self._baymodel_attrs, response) def test_get_one_by_name(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/baymodels/%s' % baymodel['name']) self.assertEqual(baymodel.uuid, response['uuid']) self._verify_attrs(self._baymodel_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/baymodels/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_baymodel(self): obj_utils.create_test_cluster_template( self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template( self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) response = self.get_json( '/baymodels/test_baymodel', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): bm_list = [] for id_ in range(4): baymodel = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(baymodel) response = self.get_json('/baymodels?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['baymodels'])) self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid']) def test_detail(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/baymodels/detail') self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"]) self._verify_attrs(self._baymodel_attrs, response['baymodels'][0]) def test_detail_with_pagination_marker(self): bm_list = [] for id_ in range(4): baymodel = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(baymodel) response = self.get_json('/baymodels/detail?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['baymodels'])) self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid']) self._verify_attrs(self._baymodel_attrs, response['baymodels'][0]) def test_detail_against_single(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/baymodels/%s/detail' % baymodel['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): baymodel = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(baymodel.uuid) response = self.get_json('/baymodels') self.assertEqual(len(bm_list), len(response['baymodels'])) uuids = [bm['uuid'] for bm in response['baymodels']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid) response = self.get_json('/baymodels/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/baymodels/?limit=3') self.assertEqual(3, len(response['baymodels'])) next_marker = response['baymodels'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/baymodels') self.assertEqual(3, len(response['baymodels'])) next_marker = response['baymodels'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) self.baymodel = obj_utils.create_test_cluster_template( self.context, name='bay_model_example_A', image_id='nerdherd', apiserver_port=8080, fixed_network='private', flavor_id='m1.magnum', master_flavor_id='m1.magnum', external_network_id='public', keypair_id='test', volume_driver='rexray', public=False, docker_volume_size=20, coe='swarm', labels={'key1': 'val1', 'key2': 'val2'} ) def test_update_not_found(self): uuid = uuidutils.generate_uuid() response = self.patch_json('/baymodels/%s' % uuid, [{'path': '/name', 'value': 'bay_model_example_B', 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_update_baymodel_with_bay(self): baymodel = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster(self.context, cluster_template_id=baymodel.uuid) response = self.patch_json('/baymodels/%s' % baymodel.uuid, [{'path': '/name', 'value': 'bay_model_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(baymodel.uuid, response.json['errors'][0]['detail']) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_baymodel_success(self, mock_policy): mock_policy.return_value = True response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/baymodels/%s' % self.baymodel.uuid) self.assertTrue(response['public']) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_baymodel_fail(self, mock_policy): mock_policy.return_value = False self.assertRaises(AppError, self.patch_json, '/baymodels/%s' % self.baymodel.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) @mock.patch.object(magnum_policy, 'enforce') def test_update_baymodel_with_bay_allow_update(self, mock_policy): mock_policy.return_value = True baymodel = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster(self.context, cluster_template_id=baymodel.uuid) response = self.patch_json('/baymodels/%s' % baymodel.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/baymodels/%s' % self.baymodel.uuid) self.assertEqual(response['public'], True) def test_update_baymodel_with_bay_not_allow_update(self): baymodel = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster(self.context, cluster_template_id=baymodel.uuid) response = self.patch_json('/baymodels/%s' % baymodel.uuid, [{'path': '/name', 'value': 'new_name', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_singular(self, mock_utcnow): name = 'bay_model_example_B' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/name', 'value': name, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/baymodels/%s' % self.baymodel.uuid) self.assertEqual(name, response['name']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.baymodel.uuid, response['uuid']) self.assertEqual(self.baymodel.image_id, response['image_id']) self.assertEqual(self.baymodel.apiserver_port, response['apiserver_port']) self.assertEqual(self.baymodel.fixed_network, response['fixed_network']) self.assertEqual(self.baymodel.network_driver, response['network_driver']) self.assertEqual(self.baymodel.volume_driver, response['volume_driver']) self.assertEqual(self.baymodel.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.baymodel.coe, response['coe']) self.assertEqual(self.baymodel.http_proxy, response['http_proxy']) self.assertEqual(self.baymodel.https_proxy, response['https_proxy']) self.assertEqual(self.baymodel.no_proxy, response['no_proxy']) self.assertEqual(self.baymodel.labels, response['labels']) def test_replace_baymodel_with_no_exist_flavor_id(self): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa") response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/flavor_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_baymodel_with_no_exist_keypair_id(self): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa") response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/keypair_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) self.assertTrue(response.json['errors']) def test_replace_baymodel_with_no_exist_external_network_id(self): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "aaa") response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/external_network_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_baymodel_with_no_exist_image_id(self): self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa") response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/image_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_create_baymodel_with_no_os_distro_image(self): image_exce = exception.OSDistroFieldNotFound('img') self.mock_valid_os_res.side_effect = image_exce response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/image_id', 'value': 'img', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_singular(self): response = self.get_json('/baymodels/%s' % self.baymodel.uuid) self.assertIsNotNone(response['dns_nameserver']) response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/dns_nameserver', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/baymodels/%s' % self.baymodel.uuid) self.assertIsNone(response['dns_nameserver']) # Assert nothing else was changed self.assertEqual(self.baymodel.uuid, response['uuid']) self.assertEqual(self.baymodel.name, response['name']) self.assertEqual(self.baymodel.apiserver_port, response['apiserver_port']) self.assertEqual(self.baymodel.image_id, response['image_id']) self.assertEqual(self.baymodel.fixed_network, response['fixed_network']) self.assertEqual(self.baymodel.network_driver, response['network_driver']) self.assertEqual(self.baymodel.volume_driver, response['volume_driver']) self.assertEqual(self.baymodel.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.baymodel.coe, response['coe']) self.assertEqual(self.baymodel.http_proxy, response['http_proxy']) self.assertEqual(self.baymodel.https_proxy, response['https_proxy']) self.assertEqual(self.baymodel.no_proxy, response['no_proxy']) self.assertEqual(self.baymodel.labels, response['labels']) def test_remove_non_existent_property_fail(self): response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/image_id', '/keypair_id', '/coe', '/external_network_id', '/server_type', '/tls_disabled', '/public', '/registry_enabled', '/cluster_distro', '/network_driver') for p in mandatory_properties: response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_add_root_non_existent(self): response = self.patch_json( '/baymodels/%s' % self.baymodel.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_remove_uuid(self): response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_baymodel(self, mock_utcnow, mock_image_data): bdict = apiutils.baymodel_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} response = self.post_json('/baymodels', bdict) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/baymodels/%s' % bdict['uuid'] self.assertEqual(expected_location, urlparse.urlparse(response.location).path) self.assertEqual(bdict['uuid'], response.json['uuid']) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_set_project_id_and_user_id(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() self.post_json('/baymodels', bdict) cc_mock.assert_called_once_with(mock.ANY) self.assertEqual(self.context.project_id, cc_mock.call_args[0][0]['project_id']) self.assertEqual(self.context.user_id, cc_mock.call_args[0][0]['user_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_doesnt_contain_id(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(image_id='my-image') response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) def _create_baymodel_raises_app_error(self, **kwargs): # Create mock for db and image data with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock,\ mock.patch('magnum.api.attr_validator.validate_image')\ as mock_image_data: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(**kwargs) self.assertRaises(AppError, self.post_json, '/baymodels', bdict) self.assertFalse(cc_mock.called) def test_create_baymodel_with_invalid_long_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "http_proxy", "https_proxy", "no_proxy", "network_driver", "labels", "volume_driver"] for field in fields: self._create_baymodel_raises_app_error(**{field: 'i' * 256}) def test_create_baymodel_with_invalid_empty_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "labels", "http_proxy", "https_proxy", "no_proxy", "network_driver", "volume_driver", "coe"] for field in fields: self._create_baymodel_raises_app_error(**{field: ''}) def test_create_baymodel_with_invalid_coe(self): self._create_baymodel_raises_app_error(coe='k8s') self._create_baymodel_raises_app_error(coe='storm') self._create_baymodel_raises_app_error(coe='meson') self._create_baymodel_raises_app_error(coe='osomatsu') def test_create_baymodel_with_invalid_docker_volume_size(self): self._create_baymodel_raises_app_error(docker_volume_size=-1) self._create_baymodel_raises_app_error( docker_volume_size=1, docker_storage_driver="devicemapper") self._create_baymodel_raises_app_error( docker_volume_size=2, docker_storage_driver="devicemapper") self._create_baymodel_raises_app_error(docker_volume_size='notanint') def test_create_baymodel_with_invalid_dns_nameserver(self): self._create_baymodel_raises_app_error(dns_nameserver='1.1.2') self._create_baymodel_raises_app_error(dns_nameserver='1.1..1') self._create_baymodel_raises_app_error(dns_nameserver='openstack.org') def test_create_baymodel_with_invalid_apiserver_port(self): self._create_baymodel_raises_app_error(apiserver_port=-12) self._create_baymodel_raises_app_error(apiserver_port=65536) self._create_baymodel_raises_app_error(apiserver_port=0) self._create_baymodel_raises_app_error(apiserver_port=1023) self._create_baymodel_raises_app_error(apiserver_port='not an int') @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_labels(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(labels={'key1': 'val1', 'key2': 'val2'}) response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['labels'], response.json['labels']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_docker_volume_size(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(docker_volume_size=99) response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_overlay(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data( docker_volume_size=1, docker_storage_driver="overlay") response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_generate_uuid(self, mock_image_data): # TODO(hongbin): Is this test correct? pass @mock.patch('magnum.api.attr_validator.validate_image') def _test_create_baymodel_network_driver_attr(self, baymodel_dict, baymodel_config_dict, expect_errors, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} for k, v in baymodel_config_dict.items(): CONF.set_override(k, v, 'cluster_template') with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: bdict = apiutils.baymodel_post_data(**baymodel_dict) response = self.post_json('/baymodels', bdict, expect_errors=expect_errors) if expect_errors: self.assertEqual(400, response.status_int) else: expected_driver = bdict.get('network_driver') if not expected_driver: expected_driver = ( CONF.cluster_template.swarm_default_network_driver) self.assertEqual(expected_driver, response.json['network_driver']) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_baymodel_with_network_driver(self): baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} config_dict = {} # Default config expect_errors_flag = False self._test_create_baymodel_network_driver_attr(baymodel_dict, config_dict, expect_errors_flag) def test_create_baymodel_with_no_network_driver(self): baymodel_dict = {} config_dict = {} expect_errors_flag = False self._test_create_baymodel_network_driver_attr(baymodel_dict, config_dict, expect_errors_flag) def test_create_baymodel_with_network_driver_non_def_config(self): baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'foo']} expect_errors_flag = False self._test_create_baymodel_network_driver_attr(baymodel_dict, config_dict, expect_errors_flag) def test_create_baymodel_with_invalid_network_driver(self): baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'bad_driver'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']} expect_errors_flag = True self._test_create_baymodel_network_driver_attr(baymodel_dict, config_dict, expect_errors_flag) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(volume_driver='rexray') response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_baymodel_public_success(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_policy.return_value = True mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(public=True) response = self.post_json('/baymodels', bdict) self.assertTrue(response.json['public']) mock_policy.assert_called_with(mock.ANY, "baymodel:publish", None, do_raise=False) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_baymodel_public_fail(self, mock_policy, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): # make policy enforcement fail mock_policy.return_value = False mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(public=True) self.assertRaises(AppError, self.post_json, '/baymodels', bdict) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_baymodel_public_not_set(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data(public=False) response = self.post_json('/baymodels', bdict) self.assertFalse(response.json['public']) # policy enforcement is called only once for enforce_wsgi self.assertEqual(1, mock_policy.call_count) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertFalse(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_os_distro_image(self, mock_image_data): mock_image_data.side_effect = exception.OSDistroFieldNotFound('img') bdict = apiutils.baymodel_post_data() del bdict['uuid'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_os_distro_image(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() del bdict['uuid'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_image_name(self, mock_image_data): mock_image = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} mock_image_data.return_value = mock_image bdict = apiutils.baymodel_post_data() del bdict['uuid'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_exist_image_name(self, mock_image_data): mock_image_data.side_effect = exception.ResourceNotFound('test-img') bdict = apiutils.baymodel_post_data() del bdict['uuid'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_multi_image_name(self, mock_image_data): mock_image_data.side_effect = exception.Conflict('Multiple images') bdict = apiutils.baymodel_post_data() del bdict['uuid'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(409, response.status_int) def test_create_baymodel_without_image_id(self): bdict = apiutils.baymodel_post_data() del bdict['image_id'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(400, response.status_int) def test_create_baymodel_without_keypair_id(self): bdict = apiutils.baymodel_post_data() del bdict['keypair_id'] response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_dns(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['dns_nameserver'], response.json['dns_nameserver']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_exist_keypair(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_flavor(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['flavor_id'], response.json['flavor_id']) self.assertEqual(bdict['master_flavor_id'], response.json['master_flavor_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_exist_flavor(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_external_network(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['external_network_id'], response.json['external_network_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_with_no_exist_external_network(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() response = self.post_json('/baymodels', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_baymodel_without_name(self, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.baymodel_post_data() bdict.pop('name') resp = self.post_json('/baymodels', bdict) self.assertEqual(201, resp.status_int) self.assertIsNotNone(resp.json['name']) class TestDelete(api_base.FunctionalTest): def test_delete_baymodel(self): baymodel = obj_utils.create_test_cluster_template(self.context) self.delete('/baymodels/%s' % baymodel.uuid) response = self.get_json('/baymodels/%s' % baymodel.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_baymodel_with_bay(self): baymodel = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster(self.context, cluster_template_id=baymodel.uuid) response = self.delete('/baymodels/%s' % baymodel.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(baymodel.uuid, response.json['errors'][0]['detail']) def test_delete_baymodel_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/baymodels/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_baymodel_with_name(self): baymodel = obj_utils.create_test_cluster_template(self.context) response = self.delete('/baymodels/%s' % baymodel['name'], expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_baymodel_with_name_not_found(self): response = self.delete('/baymodels/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_multiple_baymodel_by_name(self): obj_utils.create_test_cluster_template( self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template( self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) response = self.delete('/baymodels/test_baymodel', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestBayModelPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "baymodel:get_all", self.get_json, '/baymodels', expect_errors=True) def test_policy_disallow_get_one(self): baymodel = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "baymodel:get", self.get_json, '/baymodels/%s' % baymodel.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "baymodel:detail", self.get_json, '/baymodels/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): baymodel = obj_utils.create_test_cluster_template( self.context, name='example_A', uuid=uuidutils.generate_uuid()) self._common_policy_check( "baymodel:update", self.patch_json, '/baymodels/%s' % baymodel.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.baymodel_post_data(name='bay_model_example_A') self._common_policy_check( "baymodel:create", self.post_json, '/baymodels', bdict, expect_errors=True) def test_policy_disallow_delete(self): baymodel = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "baymodel:delete", self.delete, '/baymodels/%s' % baymodel.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): baymodel = obj_utils.create_test_cluster_template(self.context, user_id='another') self._owner_check("baymodel:get", self.get_json, '/baymodels/%s' % baymodel.uuid, expect_errors=True) def test_policy_only_owner_update(self): baymodel = obj_utils.create_test_cluster_template(self.context, user_id='another') self._owner_check( "baymodel:update", self.patch_json, '/baymodels/%s' % baymodel.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): baymodel = obj_utils.create_test_cluster_template(self.context, user_id='another') self._owner_check( "baymodel:delete", self.delete, '/baymodels/%s' % baymodel.uuid, expect_errors=True) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster_template.py0000666000175100017510000016405513244017334027103 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from six.moves.urllib import parse as urlparse from webtest.app import AppError from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import cluster_template as api_cluster_template from magnum.common import exception from magnum.common import policy as magnum_policy from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils class TestClusterTemplateObject(base.TestCase): def test_cluster_template_init(self): cluster_template_dict = apiutils.cluster_template_post_data() del cluster_template_dict['image_id'] del cluster_template_dict['registry_enabled'] del cluster_template_dict['tls_disabled'] del cluster_template_dict['public'] del cluster_template_dict['server_type'] del cluster_template_dict['master_lb_enabled'] del cluster_template_dict['floating_ip_enabled'] cluster_template = api_cluster_template.ClusterTemplate( **cluster_template_dict) self.assertEqual(wtypes.Unset, cluster_template.image_id) self.assertFalse(cluster_template.registry_enabled) self.assertFalse(cluster_template.tls_disabled) self.assertFalse(cluster_template.public) self.assertEqual('vm', cluster_template.server_type) self.assertFalse(cluster_template.master_lb_enabled) self.assertTrue(cluster_template.floating_ip_enabled) class TestListClusterTemplate(api_base.FunctionalTest): _cluster_template_attrs = ('name', 'apiserver_port', 'network_driver', 'coe', 'flavor_id', 'fixed_network', 'dns_nameserver', 'http_proxy', 'docker_volume_size', 'server_type', 'cluster_distro', 'external_network_id', 'image_id', 'registry_enabled', 'no_proxy', 'keypair_id', 'https_proxy', 'tls_disabled', 'public', 'labels', 'master_flavor_id', 'volume_driver', 'insecure_registry') def test_empty(self): response = self.get_json('/clustertemplates') self.assertEqual([], response['clustertemplates']) def test_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates') self.assertEqual(cluster_template.uuid, response['clustertemplates'][0]["uuid"]) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_get_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s' % cluster_template['uuid']) self.assertEqual(cluster_template.uuid, response['uuid']) self._verify_attrs(self._cluster_template_attrs, response) def test_get_one_by_name(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s' % cluster_template['name']) self.assertEqual(cluster_template.uuid, response['uuid']) self._verify_attrs(self._cluster_template_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/clustertemplates/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid) response = self.get_json( '/clustertemplates/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json( '/clustertemplates/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_one_by_uuid_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.get_json( '/clustertemplates/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_name_multiple_cluster_template(self): obj_utils.create_test_cluster_template( self.context, name='test_clustertemplate', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template( self.context, name='test_clustertemplate', uuid=uuidutils.generate_uuid()) response = self.get_json( '/clustertemplates/test_clustertemplate', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): bm_list = [] for id_ in range(4): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template) response = self.get_json('/clustertemplates?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['clustertemplates'])) self.assertEqual(bm_list[-1].uuid, response['clustertemplates'][0]['uuid']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_all_projects(self, mock_context, mock_policy): for id_ in range(4): obj_utils.create_test_cluster_template( self.context, id=id_, project_id=id_, uuid=uuidutils.generate_uuid()) self.context.is_admin = True response = self.get_json('/clustertemplates') self.assertEqual(4, len(response['clustertemplates'])) def test_detail(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/detail') self.assertEqual(cluster_template.uuid, response['clustertemplates'][0]["uuid"]) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_detail_with_pagination_marker(self): bm_list = [] for id_ in range(4): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template) response = self.get_json('/clustertemplates/detail?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['clustertemplates'])) self.assertEqual(bm_list[-1].uuid, response['clustertemplates'][0]['uuid']) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_detail_against_single(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s/detail' % cluster_template['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template.uuid) response = self.get_json('/clustertemplates') self.assertEqual(len(bm_list), len(response['clustertemplates'])) uuids = [bm['uuid'] for bm in response['clustertemplates']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid) response = self.get_json('/clustertemplates/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clustertemplates/?limit=3') self.assertEqual(3, len(response['clustertemplates'])) next_marker = response['clustertemplates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clustertemplates') self.assertEqual(3, len(response['clustertemplates'])) next_marker = response['clustertemplates'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) self.cluster_template = obj_utils.create_test_cluster_template( self.context, name='cluster_model_example_A', image_id='nerdherd', apiserver_port=8080, fixed_network='private', flavor_id='m1.magnum', master_flavor_id='m1.magnum', external_network_id='public', keypair_id='test', volume_driver='rexray', public=False, docker_volume_size=20, coe='swarm', labels={'key1': 'val1', 'key2': 'val2'} ) def test_update_not_found(self): uuid = uuidutils.generate_uuid() response = self.patch_json('/clustertemplates/%s' % uuid, [{'path': '/name', 'value': 'cluster_model_example_B', 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_update_cluster_template_with_cluster(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/name', 'value': 'cluster_model_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(cluster_template.uuid, response.json['errors'][0]['detail']) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_cluster_template_success(self, mock_policy): mock_policy.return_value = True response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertTrue(response['public']) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_cluster_template_fail(self, mock_policy): mock_policy.return_value = False self.assertRaises(AppError, self.patch_json, '/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) @mock.patch.object(magnum_policy, 'enforce') def test_update_cluster_template_with_cluster_allow_update(self, mock_policy): mock_policy.return_value = True cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(response['public'], True) def test_update_cluster_template_replace_labels_success(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/labels', 'value': '{\'etcd_volume_size\': \'1\'}', 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(response['labels'], {'etcd_volume_size': '1'}) def test_update_cluster_template_with_cluster_not_allow_update(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/name', 'value': 'new_name', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_singular(self, mock_utcnow): name = 'cluster_model_example_B' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/name', 'value': name, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(name, response['name']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_template.uuid, response['uuid']) self.assertEqual(self.cluster_template.image_id, response['image_id']) self.assertEqual(self.cluster_template.apiserver_port, response['apiserver_port']) self.assertEqual(self.cluster_template.fixed_network, response['fixed_network']) self.assertEqual(self.cluster_template.network_driver, response['network_driver']) self.assertEqual(self.cluster_template.volume_driver, response['volume_driver']) self.assertEqual(self.cluster_template.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.cluster_template.coe, response['coe']) self.assertEqual(self.cluster_template.http_proxy, response['http_proxy']) self.assertEqual(self.cluster_template.https_proxy, response['https_proxy']) self.assertEqual(self.cluster_template.no_proxy, response['no_proxy']) self.assertEqual(self.cluster_template.labels, response['labels']) def test_replace_cluster_template_with_no_exist_flavor_id(self): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/flavor_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_keypair_id(self): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/keypair_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_external_network_id(self): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/external_network_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_image_id(self): self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/image_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_create_cluster_template_with_no_os_distro_image(self): image_exce = exception.OSDistroFieldNotFound('img') self.mock_valid_os_res.side_effect = image_exce response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/image_id', 'value': 'img', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_singular(self): response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertIsNotNone(response['dns_nameserver']) response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/dns_nameserver', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertIsNone(response['dns_nameserver']) # Assert nothing else was changed self.assertEqual(self.cluster_template.uuid, response['uuid']) self.assertEqual(self.cluster_template.name, response['name']) self.assertEqual(self.cluster_template.apiserver_port, response['apiserver_port']) self.assertEqual(self.cluster_template.image_id, response['image_id']) self.assertEqual(self.cluster_template.fixed_network, response['fixed_network']) self.assertEqual(self.cluster_template.network_driver, response['network_driver']) self.assertEqual(self.cluster_template.volume_driver, response['volume_driver']) self.assertEqual(self.cluster_template.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.cluster_template.coe, response['coe']) self.assertEqual(self.cluster_template.http_proxy, response['http_proxy']) self.assertEqual(self.cluster_template.https_proxy, response['https_proxy']) self.assertEqual(self.cluster_template.no_proxy, response['no_proxy']) self.assertEqual(self.cluster_template.labels, response['labels']) def test_remove_non_existent_property_fail(self): response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/image_id', '/coe', '/external_network_id', '/server_type', '/tls_disabled', '/public', '/registry_enabled', '/cluster_distro', '/network_driver') for p in mandatory_properties: response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_add_root_non_existent(self): response = self.patch_json( '/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_remove_uuid(self): response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster_template(self, mock_utcnow, mock_image_data): bdict = apiutils.cluster_template_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/clustertemplates/%s' % bdict['uuid'] self.assertEqual(expected_location, urlparse.urlparse(response.location).path) self.assertEqual(bdict['uuid'], response.json['uuid']) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_set_project_id_and_user_id( self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() self.post_json('/clustertemplates', bdict) cc_mock.assert_called_once_with(mock.ANY) self.assertEqual(self.context.project_id, cc_mock.call_args[0][0]['project_id']) self.assertEqual(self.context.user_id, cc_mock.call_args[0][0]['user_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_doesnt_contain_id(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(image_id='my-image') response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) def _create_model_raises_app_error(self, **kwargs): # Create mock for db and image data with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock,\ mock.patch('magnum.api.attr_validator.validate_image')\ as mock_image_data: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(**kwargs) self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) self.assertFalse(cc_mock.called) def test_create_cluster_template_with_invalid_long_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "http_proxy", "https_proxy", "no_proxy", "network_driver", "labels", "volume_driver"] for field in fields: self._create_model_raises_app_error(**{field: 'i' * 256}) def test_create_cluster_template_with_invalid_empty_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "labels", "http_proxy", "https_proxy", "no_proxy", "network_driver", "volume_driver", "coe"] for field in fields: self._create_model_raises_app_error(**{field: ''}) def test_create_cluster_template_with_invalid_coe(self): self._create_model_raises_app_error(coe='k8s') self._create_model_raises_app_error(coe='storm') self._create_model_raises_app_error(coe='meson') self._create_model_raises_app_error(coe='osomatsu') def test_create_cluster_template_with_invalid_docker_volume_size(self): self._create_model_raises_app_error(docker_volume_size=-1) self._create_model_raises_app_error( docker_volume_size=1, docker_storage_driver="devicemapper") self._create_model_raises_app_error( docker_volume_size=2, docker_storage_driver="devicemapper") self._create_model_raises_app_error(docker_volume_size='notanint') def test_create_cluster_template_with_invalid_dns_nameserver(self): self._create_model_raises_app_error(dns_nameserver='1.1.2') self._create_model_raises_app_error(dns_nameserver='1.1..1') self._create_model_raises_app_error(dns_nameserver='openstack.org') def test_create_cluster_template_with_invalid_apiserver_port(self): self._create_model_raises_app_error(apiserver_port=-12) self._create_model_raises_app_error(apiserver_port=65536) self._create_model_raises_app_error(apiserver_port=0) self._create_model_raises_app_error(apiserver_port=1023) self._create_model_raises_app_error(apiserver_port='not an int') @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_labels(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data( labels={'key1': 'val1', 'key2': 'val2'}) response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['labels'], response.json['labels']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_docker_volume_size(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(docker_volume_size=99) response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_overlay(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data( docker_volume_size=1, docker_storage_driver="overlay") response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def _test_create_cluster_template_network_driver_attr( self, cluster_template_dict, cluster_template_config_dict, expect_errors, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} for k, v in cluster_template_config_dict.items(): cfg.CONF.set_override(k, v, 'cluster_template') with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: bdict = apiutils.cluster_template_post_data( **cluster_template_dict) response = self.post_json('/clustertemplates', bdict, expect_errors=expect_errors) if expect_errors: self.assertEqual(400, response.status_int) else: expected_driver = bdict.get('network_driver') if not expected_driver: expected_driver = ( cfg.CONF.cluster_template.swarm_default_network_driver) self.assertEqual(expected_driver, response.json['network_driver']) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_template_with_network_driver(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} config_dict = {} # Default config expect_errors_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag) def test_create_cluster_template_with_no_network_driver(self): cluster_template_dict = {} config_dict = {} expect_errors_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag) def test_create_cluster_template_with_network_driver_non_def_config(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'foo']} expect_errors_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag) def test_create_cluster_template_with_invalid_network_driver(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'bad_driver'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']} expect_errors_flag = True self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(volume_driver='rexray') response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_success(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_policy.return_value = True mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(public=True) response = self.post_json('/clustertemplates', bdict) self.assertTrue(response.json['public']) mock_policy.assert_called_with(mock.ANY, "clustertemplate:publish", None, do_raise=False) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_fail(self, mock_policy, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): # make policy enforcement fail mock_policy.return_value = False mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(public=True) self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_not_set(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data(public=False) response = self.post_json('/clustertemplates', bdict) self.assertFalse(response.json['public']) # policy enforcement is called only once for enforce_wsgi self.assertEqual(1, mock_policy.call_count) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertFalse(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_os_distro_image(self, mock_image_data): mock_image_data.side_effect = exception.OSDistroFieldNotFound('img') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_os_distro_image(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_image_name(self, mock_image_data): mock_image = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} mock_image_data.return_value = mock_image bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_image_name(self, mock_image_data): mock_image_data.side_effect = exception.ResourceNotFound('test-img') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_multi_image_name(self, mock_image_data): mock_image_data.side_effect = exception.Conflict('Multiple images') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(409, response.status_int) def test_create_cluster_template_without_image_id(self): bdict = apiutils.cluster_template_post_data() del bdict['image_id'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_without_keypair_id(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() del bdict['keypair_id'] response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_dns(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['dns_nameserver'], response.json['dns_nameserver']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_keypair(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_flavor(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['flavor_id'], response.json['flavor_id']) self.assertEqual(bdict['master_flavor_id'], response.json['master_flavor_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_flavor(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_external_network(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['external_network_id'], response.json['external_network_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_no_exist_external_network( self, mock_image_data): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_without_name(self, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-atomic'} bdict = apiutils.cluster_template_post_data() bdict.pop('name') resp = self.post_json('/clustertemplates', bdict) self.assertEqual(201, resp.status_int) self.assertIsNotNone(resp.json['name']) def test_create_cluster_with_disabled_driver(self): cfg.CONF.set_override('disabled_drivers', ['mesos_ubuntu_v1'], group='drivers') bdict = apiutils.cluster_template_post_data(coe="mesos") self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) class TestDelete(api_base.FunctionalTest): def test_delete_cluster_template(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self.delete('/clustertemplates/%s' % cluster_template.uuid) response = self.get_json('/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_template_with_cluster(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.delete('/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(cluster_template.uuid, response.json['errors'][0]['detail']) def test_delete_cluster_template_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/clustertemplates/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_template_with_name(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.delete('/clustertemplates/%s' % cluster_template['name'], expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_cluster_template_with_name_not_found(self): response = self.delete('/clustertemplates/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_multiple_cluster_template_by_name(self): obj_utils.create_test_cluster_template(self.context, name='test_cluster_template', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template(self.context, name='test_cluster_template', uuid=uuidutils.generate_uuid()) response = self.delete('/clustertemplates/test_cluster_template', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_delete_cluster_template_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.delete('/clustertemplates/%s' % temp_uuid, expect_errors=True) self.assertEqual(204, response.status_int) class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "cluster_template:get_all", self.get_json, '/clustertemplates', expect_errors=True) def test_policy_disallow_get_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "cluster_template:get", self.get_json, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "cluster_template:detail", self.get_json, '/clustertemplates/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='example_A', uuid=uuidutils.generate_uuid()) self._common_policy_check( "cluster_template:update", self.patch_json, '/clustertemplates/%s' % cluster_template.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.cluster_template_post_data( name='cluster_model_example_A') self._common_policy_check( "cluster_template:create", self.post_json, '/clustertemplates', bdict, expect_errors=True) def test_policy_disallow_delete(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "cluster_template:delete", self.delete, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check("cluster_template:get", self.get_json, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def test_policy_only_owner_update(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check( "cluster_template:update", self.patch_json, '/clustertemplates/%s' % cluster_template.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check( "cluster_template:delete", self.delete, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster.py0000666000175100017510000014114213244017334025200 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import cluster as api_cluster from magnum.common import exception from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestClusterObject(base.TestCase): def test_cluster_init(self): cluster_dict = apiutils.cluster_post_data(cluster_template_id=None) del cluster_dict['node_count'] del cluster_dict['master_count'] del cluster_dict['create_timeout'] cluster = api_cluster.Cluster(**cluster_dict) self.assertEqual(1, cluster.node_count) self.assertEqual(1, cluster.master_count) self.assertEqual(60, cluster.create_timeout) # test unset value for cluster_template_id cluster.cluster_template_id = wtypes.Unset self.assertEqual(wtypes.Unset, cluster.cluster_template_id) # test backwards compatibility of bay fields with new objects cluster_dict['create_timeout'] = 15 cluster = api_cluster.Cluster(**cluster_dict) self.assertEqual(15, cluster.create_timeout) class TestListCluster(api_base.FunctionalTest): _cluster_attrs = ("name", "cluster_template_id", "node_count", "status", "master_count", "stack_id", "create_timeout") _expand_cluster_attrs = ("name", "cluster_template_id", "node_count", "status", "api_address", "discovery_url", "node_addresses", "master_count", "master_addresses", "stack_id", "create_timeout", "status_reason") def setUp(self): super(TestListCluster, self).setUp() obj_utils.create_test_cluster_template(self.context) def test_empty(self): response = self.get_json('/clusters') self.assertEqual([], response['clusters']) def test_one(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters') self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) self._verify_attrs(self._cluster_attrs, response['clusters'][0]) # Verify attrs do not appear from cluster's get_all response none_attrs = \ set(self._expand_cluster_attrs) - set(self._cluster_attrs) self._verify_attrs(none_attrs, response['clusters'][0], positive=False) def test_get_one(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s' % cluster['uuid']) self.assertEqual(cluster.uuid, response['uuid']) self._verify_attrs(self._expand_cluster_attrs, response) @mock.patch('magnum.common.clients.OpenStackClients.heat') def test_get_one_failed_cluster(self, mock_heat): fake_resources = mock.MagicMock() fake_resources.resource_name = 'fake_name' fake_resources.resource_status_reason = 'fake_reason' ht = mock.MagicMock() ht.resources.list.return_value = [fake_resources] mock_heat.return_value = ht cluster = obj_utils.create_test_cluster(self.context, status='CREATE_FAILED') response = self.get_json('/clusters/%s' % cluster['uuid']) self.assertEqual(cluster.uuid, response['uuid']) self.assertEqual({'fake_name': 'fake_reason'}, response['faults']) @mock.patch('magnum.common.clients.OpenStackClients.heat') def test_get_one_failed_cluster_heatclient_exception(self, mock_heat): mock_heat.resources.list.side_effect = Exception('fake') cluster = obj_utils.create_test_cluster(self.context, status='CREATE_FAILED') response = self.get_json('/clusters/%s' % cluster['uuid']) self.assertEqual(cluster.uuid, response['uuid']) self.assertEqual({}, response['faults']) def test_get_one_by_name(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s' % cluster['name']) self.assertEqual(cluster.uuid, response['uuid']) self._verify_attrs(self._expand_cluster_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/clusters/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid) response = self.get_json( '/clusters/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json( '/clusters/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_one_by_uuid_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.get_json( '/clusters/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_name_multiple_cluster(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters/test_cluster', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): cluster_list = [] for id_ in range(4): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) cluster_list.append(cluster) response = self.get_json('/clusters?limit=3&marker=%s' % cluster_list[2].uuid) self.assertEqual(1, len(response['clusters'])) self.assertEqual(cluster_list[-1].uuid, response['clusters'][0]['uuid']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_all_projects(self, mock_context, mock_policy): for id_ in range(4): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid, project_id=id_) self.context.is_admin = True response = self.get_json('/clusters') self.assertEqual(4, len(response['clusters'])) def test_detail(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/detail') self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) self._verify_attrs(self._expand_cluster_attrs, response['clusters'][0]) def test_detail_with_pagination_marker(self): cluster_list = [] for id_ in range(4): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) cluster_list.append(cluster) response = self.get_json('/clusters/detail?limit=3&marker=%s' % cluster_list[2].uuid) self.assertEqual(1, len(response['clusters'])) self.assertEqual(cluster_list[-1].uuid, response['clusters'][0]['uuid']) self._verify_attrs(self._expand_cluster_attrs, response['clusters'][0]) def test_detail_against_single(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s/detail' % cluster['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) bm_list.append(cluster.uuid) response = self.get_json('/clusters') self.assertEqual(len(bm_list), len(response['clusters'])) uuids = [b['uuid'] for b in response['clusters']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, id=1, uuid=uuid) response = self.get_json('/clusters/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters/?limit=3') self.assertEqual(3, len(response['clusters'])) next_marker = response['clusters'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters') self.assertEqual(3, len(response['clusters'])) next_marker = response['clusters'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() self.cluster_template_obj = obj_utils.create_test_cluster_template( self.context) self.cluster_obj = obj_utils.create_test_cluster( self.context, name='cluster_example_A', node_count=3) p = mock.patch.object(rpcapi.API, 'cluster_update_async') self.mock_cluster_update = p.start() self.mock_cluster_update.side_effect = self._sim_rpc_cluster_update self.addCleanup(p.stop) def _sim_rpc_cluster_update(self, cluster, rollback=False): cluster.save() return cluster @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % self.cluster_obj.name, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_not_found(self, mock_utcnow): name = 'not_found' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % name, [{'path': '/name', 'value': name, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_uuid_not_found(self, mock_utcnow): uuid = uuidutils.generate_uuid() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % uuid, [{'path': '/cluster_id', 'value': uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) def test_replace_cluster_template_id_failed(self): cluster_template = obj_utils.create_test_cluster_template( self.context, uuid=uuidutils.generate_uuid()) response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/cluster_template_id', 'value': cluster_template.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_multiple_cluster(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.patch_json('/clusters/test_cluster', [{'path': '/name', 'value': 'test_cluster', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) def test_replace_non_existent_cluster_template_id(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/cluster_template_id', 'value': uuidutils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_invalid_node_count(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': -1, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_non_existent_cluster(self): response = self.patch_json('/clusters/%s' % uuidutils.generate_uuid(), [{'path': '/name', 'value': 'cluster_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_replace_cluster_name_failed(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/name', 'value': 'cluster_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_add_non_existent_property(self): response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_update_cluster_with_rollback_enabled(self): response = self.patch_json( '/clusters/%s/?rollback=True' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': 4, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.3'}) self.mock_cluster_update.assert_called_once_with(mock.ANY, True) self.assertEqual(202, response.status_code) def test_update_cluster_with_rollback_disabled(self): response = self.patch_json( '/clusters/%s/?rollback=False' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': 4, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.3'}) self.mock_cluster_update.assert_called_once_with(mock.ANY, False) self.assertEqual(202, response.status_code) def test_remove_ok(self): response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertIsNotNone(response['name']) response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) # only allow node_count for cluster, and default value is 1 self.assertEqual(1, response['node_count']) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) self.assertEqual(self.cluster_obj.name, response['name']) self.assertEqual(self.cluster_obj.master_count, response['master_count']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/uuid', '/cluster_template_id') for p in mandatory_properties: response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_remove_non_existent_property(self): response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) p = mock.patch.object(rpcapi.API, 'cluster_create_async') self.mock_cluster_create = p.start() self.mock_cluster_create.side_effect = self._simulate_cluster_create self.addCleanup(p.stop) p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) def _simulate_cluster_create(self, cluster, create_timeout): cluster.create() return cluster @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster(self, mock_utcnow): bdict = apiutils.cluster_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster_resource_limit_reached(self, mock_utcnow): # override max_cluster_per_project to 1 CONF.set_override('max_clusters_per_project', 1, group='quotas') bdict = apiutils.cluster_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time # create first cluster response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) # now try to create second cluster and make sure it fails response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(403, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_set_project_id_and_user_id(self): bdict = apiutils.cluster_post_data() def _simulate_rpc_cluster_create(cluster, create_timeout): self.assertEqual(self.context.project_id, cluster.project_id) self.assertEqual(self.context.user_id, cluster.user_id) cluster.create() return cluster self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create self.post_json('/clusters', bdict) def test_create_cluster_doesnt_contain_id(self): with mock.patch.object(self.dbapi, 'create_cluster', wraps=self.dbapi.create_cluster) as cc_mock: bdict = apiutils.cluster_post_data(name='cluster_example_A') response = self.post_json('/clusters', bdict) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_generate_uuid(self): bdict = apiutils.cluster_post_data() del bdict['uuid'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_no_cluster_template_id(self): bdict = apiutils.cluster_post_data() del bdict['cluster_template_id'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_cluster_with_non_existent_cluster_template_id(self): temp_uuid = uuidutils.generate_uuid() bdict = apiutils.cluster_post_data(cluster_template_id=temp_uuid) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_non_existent_cluster_template_name(self): modelname = 'notfound' bdict = apiutils.cluster_post_data(cluster_template_id=modelname) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_cluster_template_name(self): modelname = self.cluster_template.name bdict = apiutils.cluster_post_data(cluster_template_id=modelname) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_node_count_zero(self): bdict = apiutils.cluster_post_data() bdict['node_count'] = 0 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_node_count_negative(self): bdict = apiutils.cluster_post_data() bdict['node_count'] = -1 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_no_node_count(self): bdict = apiutils.cluster_post_data() del bdict['node_count'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_master_count_zero(self): bdict = apiutils.cluster_post_data() bdict['master_count'] = 0 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_no_master_count(self): bdict = apiutils.cluster_post_data() del bdict['master_count'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_invalid_name(self): invalid_names = ['x' * 243, '123456', '123456test_cluster', '-test_cluster', '.test_cluster', '_test_cluster', ''] for value in invalid_names: bdict = apiutils.cluster_post_data(name=value) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_valid_name(self): valid_names = ['test_cluster123456', 'test-cluster', 'test.cluster', 'testcluster.', 'testcluster-', 'testcluster_', 'test.-_cluster', 'Testcluster'] for value in valid_names: bdict = apiutils.cluster_post_data(name=value) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_without_name(self): bdict = apiutils.cluster_post_data() del bdict['name'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_timeout_none(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = None response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_no_timeout(self): def _simulate_rpc_cluster_create(cluster, create_timeout): self.assertEqual(60, create_timeout) cluster.create() return cluster self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create bdict = apiutils.cluster_post_data() del bdict['create_timeout'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_timeout_negative(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = -1 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_timeout_zero(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = 0 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_invalid_flavor(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.FlavorNotFound( 'test-flavor') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_invalid_ext_network(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = \ exception.ExternalNetworkNotFound('test-net') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_invalid_keypair(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.KeyPairNotFound( 'test-key') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(404, response.status_int) def test_create_cluster_with_nonexist_image(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.ImageNotFound( 'test-img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_multi_images_same_name(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.Conflict('test-img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(409, response.status_int) def test_create_cluster_with_on_os_distro_image(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = \ exception.OSDistroFieldNotFound('img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_no_lb_one_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.cluster_post_data( cluster_template_id=cluster_template.name, master_count=1) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_no_lb_multi_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.cluster_post_data( cluster_template_id=cluster_template.name, master_count=3) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_cluster_with_keypair(self): bdict = apiutils.cluster_post_data() bdict['keypair'] = 'keypair2' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('keypair2', cluster[0].keypair) def test_create_cluster_without_keypair(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify keypair from ClusterTemplate is used self.assertEqual('keypair1', cluster[0].keypair) def test_create_cluster_with_multi_keypair_same_name(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.Conflict('keypair2') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(409, response.status_int) def test_create_cluster_with_docker_volume_size(self): bdict = apiutils.cluster_post_data() bdict['docker_volume_size'] = 3 response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual(3, cluster[0].docker_volume_size) def test_create_cluster_with_labels(self): bdict = apiutils.cluster_post_data() bdict['labels'] = {'key': 'value'} response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual({'key': 'value'}, cluster[0].labels) def test_create_cluster_without_docker_volume_size(self): bdict = apiutils.cluster_post_data() # Remove the default docker_volume_size from the cluster dict. del bdict['docker_volume_size'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify docker_volume_size from ClusterTemplate is used self.assertEqual(20, cluster[0].docker_volume_size) def test_create_cluster_without_labels(self): bdict = apiutils.cluster_post_data() bdict.pop('labels') response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify labels from ClusterTemplate is used self.assertEqual({'key1': u'val1', 'key2': u'val2'}, cluster[0].labels) def test_create_cluster_with_invalid_docker_volume_size(self): invalid_values = [(-1, None), ('notanint', None), (1, 'devicemapper'), (2, 'devicemapper')] for value in invalid_values: bdict = apiutils.cluster_post_data(docker_volume_size=value[0], docker_storage_driver=value[1]) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_invalid_labels(self): bdict = apiutils.cluster_post_data(labels='invalid') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_master_flavor_id(self): bdict = apiutils.cluster_post_data() bdict['master_flavor_id'] = 'm2.small' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('m2.small', cluster[0].master_flavor_id) def test_create_cluster_without_master_flavor_id(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify master_flavor_id from ClusterTemplate is used self.assertEqual('m1.small', cluster[0].master_flavor_id) def test_create_cluster_with_flavor_id(self): bdict = apiutils.cluster_post_data() bdict['flavor_id'] = 'm2.small' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('m2.small', cluster[0].flavor_id) def test_create_cluster_without_flavor_id(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify flavor_id from ClusterTemplate is used self.assertEqual('m1.small', cluster[0].flavor_id) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.cluster = obj_utils.create_test_cluster(self.context) p = mock.patch.object(rpcapi.API, 'cluster_delete_async') self.mock_cluster_delete = p.start() self.mock_cluster_delete.side_effect = self._simulate_cluster_delete self.addCleanup(p.stop) def _simulate_cluster_delete(self, cluster_uuid): cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) cluster.destroy() def test_delete_cluster(self): self.delete('/clusters/%s' % self.cluster.uuid) response = self.get_json('/clusters/%s' % self.cluster.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/clusters/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_with_name_not_found(self): response = self.delete('/clusters/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_with_name(self): response = self.delete('/clusters/%s' % self.cluster.name, expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_multiple_cluster_by_name(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.delete('/clusters/test_cluster', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_delete_cluster_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid) self.context.is_admin = True response = self.delete('/clusters/%s' % temp_uuid, expect_errors=True) self.assertEqual(204, response.status_int) class TestClusterPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestClusterPolicyEnforcement, self).setUp() obj_utils.create_test_cluster_template(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "cluster:get_all", self.get_json, '/clusters', expect_errors=True) def test_policy_disallow_get_one(self): self.cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "cluster:get", self.get_json, '/clusters/%s' % self.cluster.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "cluster:detail", self.get_json, '/clusters/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): self.cluster = obj_utils.create_test_cluster(self.context, name='cluster_example_A', node_count=3) self._common_policy_check( "cluster:update", self.patch_json, '/clusters/%s' % self.cluster.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.cluster_post_data(name='cluster_example_A') self._common_policy_check( "cluster:create", self.post_json, '/clusters', bdict, expect_errors=True) def _simulate_cluster_delete(self, cluster_uuid): cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) cluster.destroy() def test_policy_disallow_delete(self): p = mock.patch.object(rpcapi.API, 'cluster_delete') self.mock_cluster_delete = p.start() self.mock_cluster_delete.side_effect = self._simulate_cluster_delete self.addCleanup(p.stop) self.cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "cluster:delete", self.delete, '/clusters/%s' % self.cluster.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("cluster:get", self.get_json, '/clusters/%s' % cluster.uuid, expect_errors=True) def test_policy_only_owner_update(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check( "cluster:update", self.patch_json, '/clusters/%s' % cluster.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("cluster:delete", self.delete, '/clusters/%s' % cluster.uuid, expect_errors=True) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_federation.py0000666000175100017510000004311113244017334025634 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_config import cfg from oslo_utils import uuidutils from magnum.api.controllers.v1 import federation as api_federation from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestFederationObject(base.TestCase): def test_federation_init(self): fed_dict = apiutils.federation_post_data() fed_dict['uuid'] = uuidutils.generate_uuid() federation = api_federation.Federation(**fed_dict) self.assertEqual(fed_dict['uuid'], federation.uuid) class TestListFederation(api_base.FunctionalTest): def setUp(self): super(TestListFederation, self).setUp() def test_empty(self): response = self.get_json('/federations') self.assertEqual(response['federations'], []) def test_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(federation.uuid, response['federations'][0]['uuid']) def test_get_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['uuid']) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_name(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['name']) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_name_not_found(self): response = self.get_json('/federations/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation(self.context, uuid=temp_uuid) response = self.get_json('/federations/%s' % temp_uuid) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json('/federations/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_federation(self): obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/test_federation', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/detail') self.assertEqual(federation.uuid, response['federations'][0]["uuid"]) def test_detail_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations/detail?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail_against_single(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json( '/federations/%s/detail' % federation['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_many(self): federation_list = [] for id_ in range(5): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation( self.context, id=id_, uuid=temp_uuid) federation_list.append(federation.uuid) response = self.get_json('/federations') self.assertEqual(len(federation_list), len(response['federations'])) uuids = [f['uuid'] for f in response['federations']] self.assertEqual(sorted(federation_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_federation(self.context, id=1, uuid=uuid) response = self.get_json('/federations/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/?limit=3') next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(3, len(response['federations'])) next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_update_async') self.mock_federation_update = p.start() self.mock_federation_update.side_effect = \ self._sim_rpc_federation_update self.addCleanup(p.stop) def _sim_rpc_federation_update(self, federation, rollback=False): federation.save() return federation def test_member_join(self): f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) new_member = obj_utils.create_test_cluster(self.context) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}]) self.assertEqual(202, response.status_int) # make sure it was added: fed = self.get_json('/federations/%s' % f.uuid) self.assertTrue(new_member.uuid in fed['member_ids']) def test_member_unjoin(self): member = obj_utils.create_test_cluster(self.context) federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[member.uuid]) response = self.patch_json( '/federations/%s' % federation.uuid, [{'path': '/member_ids', 'value': member.uuid, 'op': 'remove'}]) self.assertEqual(202, response.status_int) # make sure it was deleted: fed = self.get_json('/federations/%s' % federation.uuid) self.assertFalse(member.uuid in fed['member_ids']) def test_join_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_unjoin_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_join_cluster_already_member(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[cluster.uuid]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(409, response.status_int) def test_unjoin_non_member_cluster(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_create_async') self.mock_fed_create = p.start() self.mock_fed_create.side_effect = self._simulate_federation_create self.addCleanup(p.stop) self.hostcluster = obj_utils.create_test_cluster(self.context) def _simulate_federation_create(self, federation, create_timeout): federation.create() return federation @mock.patch('oslo_utils.timeutils.utcnow') def test_create_federation(self, mock_utcnow): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/federations', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_federation_no_hostcluster_id(self): bdict = apiutils.federation_post_data(uuid=uuidutils.generate_uuid()) del bdict['hostcluster_id'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_hostcluster_does_not_exist(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=uuidutils.generate_uuid()) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_no_dns_zone_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['properties'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_generate_uuid(self): bdict = apiutils.federation_post_data( hostcluster_id=self.hostcluster.uuid) del bdict['uuid'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_with_invalid_name(self): invalid_names = [ 'x' * 243, '123456', '123456test_federation', '-test_federation', '.test_federation', '_test_federation', '' ] for value in invalid_names: bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), name=value, hostcluster_id=self.hostcluster.uuid) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_federation_with_valid_name(self): valid_names = [ 'test_federation123456', 'test-federation', 'test.federation', 'testfederation.', 'testfederation-', 'testfederation_', 'test.-_federation', 'Testfederation' ] for value in valid_names: bdict = apiutils.federation_post_data( name=value, hostcluster_id=self.hostcluster.uuid) bdict['uuid'] = uuidutils.generate_uuid() response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_without_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['name'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid()) p = mock.patch.object(rpcapi.API, 'federation_delete_async') self.mock_federation_delete = p.start() self.mock_federation_delete.side_effect = \ self._simulate_federation_delete self.addCleanup(p.stop) def _simulate_federation_delete(self, federation_uuid): federation = objects.Federation.get_by_uuid(self.context, federation_uuid) federation.destroy() def test_delete_federation(self): self.delete('/federations/%s' % self.federation.uuid) response = self.get_json('/federations/%s' % self.federation.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_federation_not_found(self): delete = self.delete('/federations/%s' % uuidutils.generate_uuid(), expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors']) def test_delete_federation_with_name(self): delete = self.delete('/federations/%s' % self.federation.name) self.assertEqual(204, delete.status_int) def test_delete_federation_with_name_not_found(self): delete = self.delete('/federations/%s' % 'foo', expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors']) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_magnum_service.py0000666000175100017510000000730113244017334026521 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from magnum.api.controllers.v1 import magnum_services as mservice from magnum.api import servicegroup from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils class TestMagnumServiceObject(base.TestCase): def setUp(self): super(TestMagnumServiceObject, self).setUp() self.rpc_dict = apiutils.mservice_get_data() def test_msvc_obj_fields_filtering(self): """Test that it does filtering fields """ self.rpc_dict['fake-key'] = 'fake-value' msvco = mservice.MagnumService("up", **self.rpc_dict) self.assertNotIn('fake-key', msvco.fields) class db_rec(object): def __init__(self, d): self.rec_as_dict = d def as_dict(self): return self.rec_as_dict class TestMagnumServiceController(api_base.FunctionalTest): @mock.patch("magnum.common.policy.enforce") def test_empty(self, mock_policy): mock_policy.return_value = True response = self.get_json('/mservices') self.assertEqual([], response['mservices']) def _rpc_api_reply(self, count=1): reclist = [] for i in range(count): elem = apiutils.mservice_get_data() elem['id'] = i + 1 rec = db_rec(elem) reclist.append(rec) return reclist @mock.patch("magnum.common.policy.enforce") @mock.patch.object(objects.MagnumService, 'list') @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') def test_get_one(self, svc_up, rpc_patcher, mock_policy): mock_policy.return_value = True rpc_patcher.return_value = self._rpc_api_reply() svc_up.return_value = "up" response = self.get_json('/mservices') self.assertEqual(1, len(response['mservices'])) self.assertEqual(1, response['mservices'][0]['id']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(objects.MagnumService, 'list') @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') def test_get_many(self, svc_up, rpc_patcher, mock_policy): mock_policy.return_value = True svc_num = 5 rpc_patcher.return_value = self._rpc_api_reply(svc_num) svc_up.return_value = "up" response = self.get_json('/mservices') self.assertEqual(svc_num, len(response['mservices'])) for i in range(svc_num): elem = response['mservices'][i] self.assertEqual(i + 1, elem['id']) class TestMagnumServiceEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: 'project:non_fake'}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( 'magnum-service:get_all', self.get_json, '/mservices', expect_errors=True) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_types.py0000666000175100017510000002223313244017334024662 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils import mock import six import webtest import wsme from wsme import types as wtypes from magnum.api.controllers.v1 import types from magnum.common import exception from magnum.common import utils from magnum.tests.unit.api import base class TestMacAddressType(base.FunctionalTest): def test_valid_mac_addr(self): test_mac = 'aa:bb:cc:11:22:33' with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: types.MacAddressType.validate(test_mac) m_mock.assert_called_once_with(test_mac) def test_invalid_mac_addr(self): self.assertRaises(exception.InvalidMAC, types.MacAddressType.validate, 'invalid-mac') def test_frombasetype(self): test_mac = 'aa:bb:cc:11:22:33' with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: types.MacAddressType.frombasetype(test_mac) m_mock.assert_called_once_with(test_mac) def test_frombasetype_no_value(self): test_mac = None self.assertIsNone(types.MacAddressType.frombasetype(test_mac)) class TestUuidType(base.FunctionalTest): def test_valid_uuid(self): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' with mock.patch.object(uuidutils, 'is_uuid_like') as uuid_mock: types.UuidType.validate(test_uuid) uuid_mock.assert_called_once_with(test_uuid) def test_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, types.UuidType.validate, 'invalid-uuid') class MyBaseType(object): """Helper class, patched by objects of type MyPatchType""" mandatory = wsme.wsattr(wtypes.text, mandatory=True) class MyPatchType(types.JsonPatchType): """Helper class for TestJsonPatchType tests.""" _api_base = MyBaseType _extra_non_removable_attrs = {'/non_removable'} @staticmethod def internal_attrs(): return ['/internal'] class MyRoot(wsme.WSRoot): """Helper class for TestJsonPatchType tests.""" @wsme.expose([wsme.types.text], body=[MyPatchType]) @wsme.validate([MyPatchType]) def test(self, patch): return patch class TestJsonPatchType(base.FunctionalTest): def setUp(self): super(TestJsonPatchType, self).setUp() self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) def _patch_json(self, params, expect_errors=False): return self.app.patch_json( '/test', params=params, headers={'Accept': 'application/json'}, expect_errors=expect_errors) def test_valid_patches(self): valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, {'path': '/foo', 'op': 'replace', 'value': 'bar'}] ret = self._patch_json(valid_patches, False) self.assertEqual(200, ret.status_int) self.assertEqual(sorted(valid_patches, key=lambda k: k['op']), sorted(ret.json, key=lambda k: k['op'])) def test_cannot_update_internal_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_remove_internal_attr(self): patch = [{'path': '/internal', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_add_internal_attr(self): patch = [{'path': '/internal', 'op': 'add', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_update_mandatory_attr(self): patch = [{'path': '/mandatory', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, False) self.assertEqual(200, ret.status_int) self.assertEqual(patch, ret.json) def test_cannot_remove_mandatory_attr(self): patch = [{'path': '/mandatory', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_remove_extra_non_removable_attr(self): patch = [{'path': '/non_removable', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_path(self): missing_path = [{'op': 'remove'}] ret = self._patch_json(missing_path, True) self.assertEqual(400, ret.status_int) def test_missing_required_fields_op(self): missing_op = [{'path': '/foo'}] ret = self._patch_json(missing_op, True) self.assertEqual(400, ret.status_int) def test_invalid_op(self): patch = [{'path': '/foo', 'op': 'invalid'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_invalid_path(self): patch = [{'path': 'invalid-path', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_add_with_no_value(self): patch = [{'path': '/extra/foo', 'op': 'add'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_replace_with_no_value(self): patch = [{'path': '/foo', 'op': 'replace'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) class TestMultiType(base.FunctionalTest): def test_valid_values(self): vt = types.MultiType(wsme.types.text, six.integer_types) value = vt.validate("hello") self.assertEqual("hello", value) value = vt.validate(10) self.assertEqual(10, value) vt = types.MultiType(types.UuidType, types.NameType) value = vt.validate('name') self.assertEqual('name', value) uuid = "437319e3-d10f-49ec-84c8-e4abb6118c29" value = vt.validate(uuid) self.assertEqual(uuid, value) vt = types.MultiType(types.UuidType, six.integer_types) value = vt.validate(10) self.assertEqual(10, value) value = vt.validate(uuid) self.assertEqual(uuid, value) def test_invalid_values(self): vt = types.MultiType(wsme.types.text, six.integer_types) self.assertRaises(ValueError, vt.validate, 0.10) self.assertRaises(ValueError, vt.validate, object()) vt = types.MultiType(types.UuidType, six.integer_types) self.assertRaises(ValueError, vt.validate, 'abc') self.assertRaises(ValueError, vt.validate, 0.10) def test_multitype_tostring(self): vt = types.MultiType(str, int) vts = str(vt) self.assertIn(str(str), vts) self.assertIn(str(int), vts) class TestBooleanType(base.FunctionalTest): def test_valid_true_values(self): v = types.BooleanType() self.assertTrue(v.validate("true")) self.assertTrue(v.validate("TRUE")) self.assertTrue(v.validate("True")) self.assertTrue(v.validate("t")) self.assertTrue(v.validate("1")) self.assertTrue(v.validate("y")) self.assertTrue(v.validate("yes")) self.assertTrue(v.validate("on")) def test_valid_false_values(self): v = types.BooleanType() self.assertFalse(v.validate("false")) self.assertFalse(v.validate("FALSE")) self.assertFalse(v.validate("False")) self.assertFalse(v.validate("f")) self.assertFalse(v.validate("0")) self.assertFalse(v.validate("n")) self.assertFalse(v.validate("no")) self.assertFalse(v.validate("off")) def test_invalid_value(self): v = types.BooleanType() self.assertRaises(exception.Invalid, v.validate, "invalid-value") self.assertRaises(exception.Invalid, v.validate, "01") def test_frombasetype_no_value(self): v = types.BooleanType() self.assertIsNone(v.frombasetype(None)) class TestNameType(base.FunctionalTest): def test_valid_name(self): self.assertEqual('name', types.NameType.validate('name')) self.assertEqual(1234, types.NameType.validate(1234)) def test_invalid_name(self): self.assertRaises(exception.InvalidName, types.NameType.validate, None) self.assertRaises(exception.InvalidName, types.NameType.validate, '') def test_frombasetype_no_value(self): self.assertEqual('name', types.NameType.frombasetype('name')) self.assertEqual(1234, types.NameType.frombasetype(1234)) def test_frombasetype(self): self.assertIsNone(types.NameType.frombasetype(None)) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_bay.py0000666000175100017510000012516513244017334024301 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import bay as api_bay from magnum.common import exception from magnum.conductor import api as rpcapi from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils class TestBayObject(base.TestCase): def test_bay_init(self): bay_dict = apiutils.bay_post_data(baymodel_id=None) del bay_dict['node_count'] del bay_dict['master_count'] del bay_dict['bay_create_timeout'] bay = api_bay.Bay(**bay_dict) self.assertEqual(1, bay.node_count) self.assertEqual(1, bay.master_count) self.assertEqual(60, bay.bay_create_timeout) # test unset value for baymodel_id bay.baymodel_id = wtypes.Unset self.assertEqual(wtypes.Unset, bay.baymodel_id) # test backwards compatibility of bay fields with new objects bay_dict['bay_create_timeout'] = 15 bay_dict['bay_faults'] = {'testfault': 'fault'} bay = api_bay.Bay(**bay_dict) self.assertEqual(15, bay.bay_create_timeout) self.assertEqual(15, bay.create_timeout) self.assertIn('testfault', bay.bay_faults) self.assertIn('testfault', bay.faults) def test_as_dict_faults(self): bay_dict = apiutils.bay_post_data(baymodel_id=None) del bay_dict['node_count'] del bay_dict['master_count'] del bay_dict['bay_create_timeout'] bay = api_bay.Bay(**bay_dict) bay.bay_faults = {'testfault': 'fault'} dict = bay.as_dict() self.assertEqual({'testfault': 'fault'}, dict['faults']) class TestListBay(api_base.FunctionalTest): _bay_attrs = ("name", "baymodel_id", "node_count", "status", "master_count", "stack_id", "bay_create_timeout") _expand_bay_attrs = ("name", "baymodel_id", "node_count", "status", "api_address", "discovery_url", "node_addresses", "master_count", "master_addresses", "stack_id", "bay_create_timeout", "status_reason") def setUp(self): super(TestListBay, self).setUp() obj_utils.create_test_cluster_template(self.context) def test_empty(self): response = self.get_json('/bays') self.assertEqual([], response['bays']) def test_one(self): bay = obj_utils.create_test_cluster(self.context) response = self.get_json('/bays') self.assertEqual(bay.uuid, response['bays'][0]["uuid"]) self._verify_attrs(self._bay_attrs, response['bays'][0]) # Verify atts that should not appear from bay's get_all response none_attrs = set(self._expand_bay_attrs) - set(self._bay_attrs) self._verify_attrs(none_attrs, response['bays'][0], positive=False) def test_get_one(self): bay = obj_utils.create_test_cluster(self.context) response = self.get_json('/bays/%s' % bay['uuid']) self.assertEqual(bay.uuid, response['uuid']) self._verify_attrs(self._expand_bay_attrs, response) @mock.patch('magnum.common.clients.OpenStackClients.heat') def test_get_one_failed_bay(self, mock_heat): fake_resources = mock.MagicMock() fake_resources.resource_name = 'fake_name' fake_resources.resource_status_reason = 'fake_reason' ht = mock.MagicMock() ht.resources.list.return_value = [fake_resources] mock_heat.return_value = ht bay = obj_utils.create_test_cluster(self.context, status='CREATE_FAILED') response = self.get_json('/bays/%s' % bay['uuid']) self.assertEqual(bay.uuid, response['uuid']) self.assertEqual({'fake_name': 'fake_reason'}, response['bay_faults']) @mock.patch('magnum.common.clients.OpenStackClients.heat') def test_get_one_failed_bay_heatclient_exception(self, mock_heat): mock_heat.resources.list.side_effect = Exception('fake') bay = obj_utils.create_test_cluster(self.context, status='CREATE_FAILED') response = self.get_json('/bays/%s' % bay['uuid']) self.assertEqual(bay.uuid, response['uuid']) self.assertEqual({}, response['bay_faults']) def test_get_one_by_name(self): bay = obj_utils.create_test_cluster(self.context) response = self.get_json('/bays/%s' % bay['name']) self.assertEqual(bay.uuid, response['uuid']) self._verify_attrs(self._expand_bay_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/bays/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_bay(self): obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) response = self.get_json('/bays/test_bay', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): bay_list = [] for id_ in range(4): bay = obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) bay_list.append(bay) response = self.get_json('/bays?limit=3&marker=%s' % bay_list[2].uuid) self.assertEqual(1, len(response['bays'])) self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid']) def test_detail(self): bay = obj_utils.create_test_cluster(self.context) response = self.get_json('/bays/detail') self.assertEqual(bay.uuid, response['bays'][0]["uuid"]) self._verify_attrs(self._expand_bay_attrs, response['bays'][0]) def test_detail_with_pagination_marker(self): bay_list = [] for id_ in range(4): bay = obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) bay_list.append(bay) response = self.get_json('/bays/detail?limit=3&marker=%s' % bay_list[2].uuid) self.assertEqual(1, len(response['bays'])) self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid']) self._verify_attrs(self._expand_bay_attrs, response['bays'][0]) def test_detail_against_single(self): bay = obj_utils.create_test_cluster(self.context) response = self.get_json('/bays/%s/detail' % bay['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): bay = obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(bay.uuid) response = self.get_json('/bays') self.assertEqual(len(bm_list), len(response['bays'])) uuids = [b['uuid'] for b in response['bays']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, id=1, uuid=uuid) response = self.get_json('/bays/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/bays/?limit=3') self.assertEqual(3, len(response['bays'])) next_marker = response['bays'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/bays') self.assertEqual(3, len(response['bays'])) next_marker = response['bays'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.bay = obj_utils.create_test_cluster(self.context, name='bay_example_A', node_count=3) p = mock.patch.object(rpcapi.API, 'cluster_update') self.mock_bay_update = p.start() self.mock_bay_update.side_effect = self._simulate_rpc_bay_update self.addCleanup(p.stop) def _simulate_rpc_bay_update(self, bay, rollback=False): bay.save() return bay @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/bays/%s' % self.bay.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.bay.uuid, response['uuid']) self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/bays/%s' % self.bay.name, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/bays/%s' % self.bay.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.bay.uuid, response['uuid']) self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_not_found(self, mock_utcnow): name = 'not_found' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/bays/%s' % name, [{'path': '/name', 'value': name, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) def test_replace_baymodel_id_failed(self): cluster_template = obj_utils.create_test_cluster_template( self.context, uuid=uuidutils.generate_uuid()) response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/baymodel_id', 'value': cluster_template.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_multiple_bay(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) response = self.patch_json('/bays/test_bay', [{'path': '/name', 'value': 'test_bay', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) def test_replace_non_existent_baymodel_id(self): response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/baymodel_id', 'value': uuidutils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_invalid_node_count(self): response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/node_count', 'value': -1, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_non_existent_bay(self): response = self.patch_json('/bays/%s' % uuidutils.generate_uuid(), [{'path': '/name', 'value': 'bay_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_replace_bay_name_failed(self): response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/name', 'value': 'bay_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_add_non_existent_property(self): response = self.patch_json( '/bays/%s' % self.bay.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch.object(rpcapi.API, 'cluster_update_async') def test_update_bay_async(self, mock_update): response = self.patch_json( '/bays/%s' % self.bay.name, [{'path': '/node_count', 'value': 4, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.2'}) self.assertEqual(202, response.status_code) @mock.patch.object(rpcapi.API, 'cluster_update_async') def test_update_bay_with_rollback_enabled(self, mock_update): response = self.patch_json( '/bays/%s/?rollback=True' % self.bay.name, [{'path': '/node_count', 'value': 4, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.3'}) mock_update.assert_called_once_with(mock.ANY, rollback=True) self.assertEqual(202, response.status_code) def test_remove_ok(self): response = self.get_json('/bays/%s' % self.bay.uuid) self.assertIsNotNone(response['name']) response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': '/node_count', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/bays/%s' % self.bay.uuid) # only allow node_count for bay, and default value is 1 self.assertEqual(1, response['node_count']) # Assert nothing else was changed self.assertEqual(self.bay.uuid, response['uuid']) self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) self.assertEqual(self.bay.name, response['name']) self.assertEqual(self.bay.master_count, response['master_count']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/uuid', '/baymodel_id') for p in mandatory_properties: response = self.patch_json('/bays/%s' % self.bay.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_remove_non_existent_property(self): response = self.patch_json( '/bays/%s' % self.bay.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) p = mock.patch.object(rpcapi.API, 'cluster_create') self.mock_bay_create = p.start() self.mock_bay_create.side_effect = self._simulate_rpc_bay_create self.addCleanup(p.stop) p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) def _simulate_rpc_bay_create(self, bay, bay_create_timeout): bay.create() return bay @mock.patch('oslo_utils.timeutils.utcnow') def test_create_bay(self, mock_utcnow): bdict = apiutils.bay_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/bays', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) self.assertEqual(bdict['bay_create_timeout'], response.json['bay_create_timeout']) def test_create_bay_set_project_id_and_user_id(self): bdict = apiutils.bay_post_data() def _simulate_rpc_bay_create(bay, bay_create_timeout): self.assertEqual(self.context.project_id, bay.project_id) self.assertEqual(self.context.user_id, bay.user_id) bay.create() return bay self.mock_bay_create.side_effect = _simulate_rpc_bay_create self.post_json('/bays', bdict) def test_create_bay_doesnt_contain_id(self): with mock.patch.object(self.dbapi, 'create_cluster', wraps=self.dbapi.create_cluster) as cc_mock: bdict = apiutils.bay_post_data(name='bay_example_A') response = self.post_json('/bays', bdict) self.assertEqual(bdict['name'], response.json['name']) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) def test_create_bay_generate_uuid(self): bdict = apiutils.bay_post_data() del bdict['uuid'] response = self.post_json('/bays', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(bdict['name'], response.json['name']) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_bay_no_baymodel_id(self): bdict = apiutils.bay_post_data() del bdict['baymodel_id'] response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_bay_with_non_existent_baymodel_id(self): bdict = apiutils.bay_post_data(baymodel_id=uuidutils.generate_uuid()) response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_baymodel_name(self): bdict = apiutils.bay_post_data(baymodel_id=self.cluster_template.name) response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) def test_create_bay_with_node_count_zero(self): bdict = apiutils.bay_post_data() bdict['node_count'] = 0 response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_node_count_negative(self): bdict = apiutils.bay_post_data() bdict['node_count'] = -1 response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_no_node_count(self): bdict = apiutils.bay_post_data() del bdict['node_count'] response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(1, response.json['node_count']) def test_create_bay_with_master_count_zero(self): bdict = apiutils.bay_post_data() bdict['master_count'] = 0 response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_no_master_count(self): bdict = apiutils.bay_post_data() del bdict['master_count'] response = self.post_json('/bays', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(1, response.json['master_count']) def test_create_bay_with_invalid_long_name(self): bdict = apiutils.bay_post_data(name='x' * 243) response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_invalid_integer_name(self): bdict = apiutils.bay_post_data(name='123456') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_invalid_integer_str_name(self): bdict = apiutils.bay_post_data(name='123456test_bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_hyphen_invalid_at_start_name(self): bdict = apiutils.bay_post_data(name='-test_bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_period_invalid_at_start_name(self): bdict = apiutils.bay_post_data(name='.test_bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_underscore_invalid_at_start_name(self): bdict = apiutils.bay_post_data(name='_test_bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_valid_str_int_name(self): bdict = apiutils.bay_post_data(name='test_bay123456') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_hyphen_valid_name(self): bdict = apiutils.bay_post_data(name='test-bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_period_valid_name(self): bdict = apiutils.bay_post_data(name='test.bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_period_at_end_valid_name(self): bdict = apiutils.bay_post_data(name='testbay.') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_hyphen_at_end_valid_name(self): bdict = apiutils.bay_post_data(name='testbay-') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_underscore_at_end_valid_name(self): bdict = apiutils.bay_post_data(name='testbay_') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_mix_special_char_valid_name(self): bdict = apiutils.bay_post_data(name='test.-_bay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_capital_letter_start_valid_name(self): bdict = apiutils.bay_post_data(name='Testbay') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(response.json['name'], bdict['name']) def test_create_bay_with_invalid_empty_name(self): bdict = apiutils.bay_post_data(name='') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_without_name(self): bdict = apiutils.bay_post_data() del bdict['name'] response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertIsNotNone(response.json['name']) def test_create_bay_with_timeout_none(self): bdict = apiutils.bay_post_data() bdict['bay_create_timeout'] = None response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) def test_create_bay_with_no_timeout(self): def _simulate_rpc_bay_create(bay, bay_create_timeout): self.assertEqual(60, bay_create_timeout) bay.create() return bay self.mock_bay_create.side_effect = _simulate_rpc_bay_create bdict = apiutils.bay_post_data() del bdict['bay_create_timeout'] response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) def test_create_bay_with_timeout_negative(self): bdict = apiutils.bay_post_data() bdict['bay_create_timeout'] = -1 response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_bay_with_timeout_zero(self): bdict = apiutils.bay_post_data() bdict['bay_create_timeout'] = 0 response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) def test_create_bay_with_invalid_flavor(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.FlavorNotFound( 'test-flavor') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_bay_with_invalid_ext_network(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( 'test-net') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_bay_with_invalid_keypair(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.KeyPairNotFound( 'test-key') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(404, response.status_int) def test_create_bay_with_nonexist_image(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.ImageNotFound( 'test-img') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_bay_with_multi_images_same_name(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.Conflict('test-img') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(409, response.status_int) def test_create_bay_with_on_os_distro_image(self): bdict = apiutils.bay_post_data() self.mock_valid_os_res.side_effect = exception.OSDistroFieldNotFound( 'img') response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_bay_with_no_lb_one_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.bay_post_data(baymodel_id=cluster_template.name, master_count=1) response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) def test_create_bay_with_no_lb_multi_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.bay_post_data(baymodel_id=cluster_template.name, master_count=3) response = self.post_json('/bays', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_bay_with_docker_volume_size(self): bdict = apiutils.bay_post_data() bdict['docker_volume_size'] = 3 response = self.post_json('/bays', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) bay, timeout = self.mock_bay_create.call_args self.assertEqual(3, bay[0].docker_volume_size) def test_create_bay_without_docker_volume_size(self): bdict = apiutils.bay_post_data() # Remove the default docker_volume_size from the bay dict. del bdict['docker_volume_size'] response = self.post_json('/bays', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) bay, timeout = self.mock_bay_create.call_args # Verify docker_volume_size from BayModel is used self.assertEqual(20, bay[0].docker_volume_size) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.bay = obj_utils.create_test_cluster(self.context) p = mock.patch.object(rpcapi.API, 'cluster_delete') self.mock_bay_delete = p.start() self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete self.addCleanup(p.stop) def _simulate_rpc_bay_delete(self, bay_uuid): bay = objects.Cluster.get_by_uuid(self.context, bay_uuid) bay.destroy() def test_delete_bay(self): self.delete('/bays/%s' % self.bay.uuid) response = self.get_json('/bays/%s' % self.bay.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_bay_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/bays/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_bay_with_name_not_found(self): response = self.delete('/bays/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_bay_with_name(self): response = self.delete('/bays/%s' % self.bay.name, expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_multiple_bay_by_name(self): obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_bay', uuid=uuidutils.generate_uuid()) response = self.delete('/bays/test_bay', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestBayPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestBayPolicyEnforcement, self).setUp() obj_utils.create_test_cluster_template(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "bay:get_all", self.get_json, '/bays', expect_errors=True) def test_policy_disallow_get_one(self): self.bay = obj_utils.create_test_cluster(self.context) self._common_policy_check( "bay:get", self.get_json, '/bays/%s' % self.bay.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "bay:detail", self.get_json, '/bays/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): self.bay = obj_utils.create_test_cluster(self.context, name='bay_example_A', node_count=3) self._common_policy_check( "bay:update", self.patch_json, '/bays/%s' % self.bay.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.bay_post_data(name='bay_example_A') self._common_policy_check( "bay:create", self.post_json, '/bays', bdict, expect_errors=True) def _simulate_rpc_bay_delete(self, bay_uuid): bay = objects.Cluster.get_by_uuid(self.context, bay_uuid) bay.destroy() def test_policy_disallow_delete(self): p = mock.patch.object(rpcapi.API, 'cluster_delete') self.mock_bay_delete = p.start() self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete self.addCleanup(p.stop) self.bay = obj_utils.create_test_cluster(self.context) self._common_policy_check( "bay:delete", self.delete, '/bays/%s' % self.bay.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): bay = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("bay:get", self.get_json, '/bays/%s' % bay.uuid, expect_errors=True) def test_policy_only_owner_update(self): bay = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check( "bay:update", self.patch_json, '/bays/%s' % bay.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): bay = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("bay:delete", self.delete, '/bays/%s' % bay.uuid, expect_errors=True) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/__init__.py0000666000175100017510000000000013244017334024202 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_certificate.py0000666000175100017510000002700113244017334025776 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import uuidutils from magnum.api.controllers.v1 import certificate as api_cert from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as api_utils from magnum.tests.unit.objects import utils as obj_utils HEADERS = {'OpenStack-API-Version': 'container-infra latest'} class TestCertObject(base.TestCase): @mock.patch('magnum.api.utils.get_resource') def test_cert_init(self, mock_get_resource): cert_dict = api_utils.cert_post_data() mock_cluster = mock.MagicMock() mock_cluster.uuid = cert_dict['cluster_uuid'] mock_get_resource.return_value = mock_cluster cert = api_cert.Certificate(**cert_dict) self.assertEqual(cert_dict['cluster_uuid'], cert.cluster_uuid) self.assertEqual(cert_dict['csr'], cert.csr) self.assertEqual(cert_dict['pem'], cert.pem) class TestGetCaCertificate(api_base.FunctionalTest): def setUp(self): super(TestGetCaCertificate, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) def test_get_one(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.uuid, headers=HEADERS) self.assertEqual(self.cluster.uuid, response['cluster_uuid']) # check that bay is still valid as well self.assertEqual(self.cluster.uuid, response['bay_uuid']) self.assertEqual(fake_cert['csr'], response['csr']) self.assertEqual(fake_cert['pem'], response['pem']) def test_get_one_by_name(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.name, headers=HEADERS) self.assertEqual(self.cluster.uuid, response['cluster_uuid']) # check that bay is still valid as well self.assertEqual(self.cluster.uuid, response['bay_uuid']) self.assertEqual(fake_cert['csr'], response['csr']) self.assertEqual(fake_cert['pem'], response['pem']) def test_get_one_by_name_not_found(self): response = self.get_json('/certificates/not_found', expect_errors=True, headers=HEADERS) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_cluster(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.get_json('/certificates/test_cluster', expect_errors=True, headers=HEADERS) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_links(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.uuid, headers=HEADERS) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(self.cluster.uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) self.conductor_api.sign_certificate.side_effect = self._fake_sign @staticmethod def _fake_sign(cluster, cert): cert.pem = 'fake-pem' return cert def test_create_cert(self, ): new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.uuid) del new_cert['pem'] response = self.post_json('/certificates', new_cert, headers=HEADERS) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(new_cert['cluster_uuid'], response.json['cluster_uuid']) # verify bay_uuid is still valid as well self.assertEqual(new_cert['cluster_uuid'], response.json['bay_uuid']) self.assertEqual('fake-pem', response.json['pem']) # Test that bay_uuid is still backward compatible def test_create_cert_by_bay_name(self, ): new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.uuid) del new_cert['pem'] new_cert['bay_uuid'] = new_cert['cluster_uuid'] del new_cert['cluster_uuid'] response = self.post_json('/certificates', new_cert, headers=HEADERS) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(self.cluster.uuid, response.json['cluster_uuid']) # verify bay_uuid is still valid as well self.assertEqual(self.cluster.uuid, response.json['bay_uuid']) self.assertEqual('fake-pem', response.json['pem']) def test_create_cert_by_cluster_name(self, ): new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.name) del new_cert['pem'] response = self.post_json('/certificates', new_cert, headers=HEADERS) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(self.cluster.uuid, response.json['cluster_uuid']) self.assertEqual('fake-pem', response.json['pem']) def test_create_cert_cluster_not_found(self, ): new_cert = api_utils.cert_post_data(cluster_uuid='not_found') del new_cert['pem'] response = self.post_json('/certificates', new_cert, expect_errors=True, headers=HEADERS) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestRotateCaCertificate(api_base.FunctionalTest): def setUp(self): super(TestRotateCaCertificate, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) @mock.patch("magnum.common.policy.enforce") def test_rotate_ca_cert(self, mock_policy): mock_policy.return_value = True fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.rotate_ca_certificate.return_value = mock_cert response = self.patch_json('/certificates/%s' % self.cluster.uuid, params={}, headers=HEADERS) self.assertEqual(202, response.status_code) class TestRotateCaCertificateNonTls(api_base.FunctionalTest): def setUp(self): super(TestRotateCaCertificateNonTls, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context, tls_disabled=True) self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) @mock.patch("magnum.common.policy.enforce") def test_rotate_ca_cert_non_tls(self, mock_policy): mock_policy.return_value = True fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.rotate_ca_certificate.return_value = mock_cert response = self.patch_json('/certificates/%s' % self.cluster.uuid, params={}, headers=HEADERS, expect_errors=True) self.assertEqual(400, response.status_code) self.assertIn("Rotating the CA certificate on a non-TLS cluster", response.json['errors'][0]['detail']) class TestCertPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project_id:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_one(self): cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "certificate:get", self.get_json, '/certificates/%s' % cluster.uuid, expect_errors=True, headers=HEADERS) def test_policy_disallow_create(self): cluster = obj_utils.create_test_cluster(self.context) cert = api_utils.cert_post_data(cluster_uuid=cluster.uuid) self._common_policy_check( "certificate:create", self.post_json, '/certificates', cert, expect_errors=True, headers=HEADERS) def test_policy_disallow_rotate(self): cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "certificate:rotate_ca", self.patch_json, '/certificates/%s' % cluster.uuid, params={}, expect_errors=True, headers=HEADERS) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_quota.py0000666000175100017510000003323213244017334024650 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from keystoneauth1 import exceptions as ka_exception from magnum.api.controllers.v1 import quota as api_quota from magnum.common import clients from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils class TestQuotaObject(base.TestCase): def test_quota_init(self): quota_dict = apiutils.quota_post_data() del quota_dict['hard_limit'] quota = api_quota.Quota(**quota_dict) self.assertEqual(1, quota.hard_limit) class TestQuota(api_base.FunctionalTest): _quota_attrs = ("project_id", "resource", "hard_limit") def setUp(self): super(TestQuota, self).setUp() @mock.patch("magnum.common.policy.enforce") def test_empty(self, mock_policy): mock_policy.return_value = True response = self.get_json('/quotas') self.assertEqual([], response['quotas']) @mock.patch("magnum.common.policy.enforce") def test_one(self, mock_policy): mock_policy.return_value = True quota = obj_utils.create_test_quota(self.context) response = self.get_json('/quotas') self.assertEqual(quota.project_id, response['quotas'][0]["project_id"]) self._verify_attrs(self._quota_attrs, response['quotas'][0]) @mock.patch("magnum.common.policy.enforce") def test_get_one(self, mock_policy): mock_policy.return_value = True quota = obj_utils.create_test_quota(self.context) response = self.get_json('/quotas/%s/%s' % (quota['project_id'], quota['resource'])) self.assertEqual(quota.project_id, response['project_id']) self.assertEqual(quota.resource, response['resource']) @mock.patch("magnum.common.policy.enforce") def test_get_one_not_found(self, mock_policy): mock_policy.return_value = True response = self.get_json( '/quotas/fake_project/invalid_res', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_not_authorized(self): obj_utils.create_test_quota(self.context) response = self.get_json( '/quotas/invalid_proj/invalid_res', expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_tenants(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?all_tenants=True') self.assertEqual(4, len(response['quotas'])) expected = [r.project_id for r in quota_list] res_proj_ids = [r['project_id'] for r in response['quotas']] self.assertEqual(sorted(expected), sorted(res_proj_ids)) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_non_admin_context(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = False response = self.get_json('/quotas?all_tenants=True') self.assertEqual(0, len(response['quotas'])) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_not_all_tenants(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True self.context.project_id = 'proj-id-1' response = self.get_json('/quotas') self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_pagination_limit(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?limit=2&all_tenants=True') self.assertEqual(2, len(response['quotas'])) expected = [r.project_id for r in quota_list[:2]] res_proj_ids = [r['project_id'] for r in response['quotas']] self.assertEqual(sorted(expected), sorted(res_proj_ids)) self.assertTrue('http://localhost/v1/quotas?' in response['next']) self.assertTrue('sort_key=id' in response['next']) self.assertTrue('sort_dir=asc' in response['next']) self.assertTrue('limit=2' in response['next']) self.assertTrue('marker=%s' % quota_list[1].id in response['next']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_with_pagination_marker(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?limit=3&marker=%s&all_tenants=True' % quota_list[2].id) self.assertEqual(1, len(response['quotas'])) self.assertEqual(quota_list[-1].project_id, response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_tenants_false(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True self.context.project_id = 'proj-id-1' response = self.get_json('/quotas?all_tenants=False') self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") def test_get_all_non_admin(self, mock_policy): mock_policy.return_value = True quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) headers = {'X-Project-Id': 'proj-id-2'} response = self.get_json('/quotas', headers=headers) self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-2', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(quota_dict['project_id'], response.json['project_id']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_project_id_not_found(self, mock_keystone): keystone = mock.MagicMock() exp = ka_exception.http.NotFound() keystone.domain_admin_client.projects .get.side_effect = exp mock_keystone.return_value = keystone quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_int) self.assertTrue(response.json['errors']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_invalid_resource(self, mock_keystone): quota_dict = apiutils.quota_post_data() quota_dict['resource'] = 'invalid-res' response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_invalid_hard_limit(self, mock_keystone): quota_dict = apiutils.quota_post_data() quota_dict['hard_limit'] = -10 response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_no_project_id(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() del quota_dict['project_id'] response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_patch_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data(hard_limit=5) response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(quota_dict['project_id'], response.json['project_id']) self.assertEqual(5, response.json['hard_limit']) quota_dict['hard_limit'] = 20 response = self.patch_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual(20, response.json['hard_limit']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_patch_quota_not_found(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # update quota with non-existing project id update_dict = {'project_id': 'not-found', 'hard_limit': 20, 'resource': 'Cluster'} response = self.patch_json('/quotas', update_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_delete_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) project_id = quota_dict['project_id'] resource = quota_dict['resource'] # delete quota self.delete('/quotas/%s/%s' % (project_id, resource)) # now check that quota does not exist response = self.get_json( '/quotas/%s/%s' % (project_id, resource), expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_stats.py0000666000175100017510000001332713244017334024660 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from webtest.app import AppError from magnum.tests.unit.api import base as api_base from magnum.tests.unit.objects import utils as obj_utils class TestStatsController(api_base.FunctionalTest): def setUp(self): self.base_headers = {'OpenStack-API-Version': 'container-infra 1.4'} super(TestStatsController, self).setUp() obj_utils.create_test_cluster_template(self.context) def test_empty(self): response = self.get_json('/stats', headers=self.base_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_all_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') response = self.get_json('/stats', headers=self.base_headers) expected = {u'clusters': 2, u'nodes': 12} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_tenant_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') self.context.is_admin = True response = self.get_json('/stats?project_id=234', headers=self.base_headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_invalid_tenant_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') self.context.is_admin = True response = self.get_json('/stats?project_id=34', headers=self.base_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) def test_get_self_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5, master_count=1) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' response = self.get_json('/stats', headers=headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) def test_get_self_stats_without_param(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5, master_count=1) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' response = self.get_json('/stats', headers=headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) def test_get_some_other_user_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' self.assertRaises(AppError, self.get_json, '/stats?project_id=123', headers=headers) def test_get_invalid_type_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') self.assertRaises(AppError, self.get_json, '/stats?project_id=123&type=invalid', headers=self.base_headers) magnum-6.1.0/magnum/tests/unit/api/controllers/noauth-paste.ini0000666000175100017510000000064213244017334024670 0ustar zuulzuul00000000000000[pipeline:main] pipeline = cors request_id api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = / paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum magnum-6.1.0/magnum/tests/unit/api/controllers/auth-paste.ini0000666000175100017510000000110513244017334024326 0ustar zuulzuul00000000000000[pipeline:main] pipeline = cors healthcheck request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable magnum-6.1.0/magnum/tests/unit/api/controllers/test_root.py0000666000175100017510000002506513244017334024161 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures import mock from oslo_config import cfg from webob import exc as webob_exc try: import configparser as ConfigParser except ImportError: import ConfigParser import shutil import webtest from magnum.api import app from magnum.api.controllers import v1 as v1_api from magnum.tests import base as test_base from magnum.tests.unit.api import base as api_base class TestRootController(api_base.FunctionalTest): def setUp(self): super(TestRootController, self).setUp() self.root_expected = { u'description': u'Magnum is an OpenStack project which ' 'aims to provide container cluster management.', u'name': u'OpenStack Magnum API', u'versions': [{u'id': u'v1', u'links': [{u'href': u'http://localhost/v1/', u'rel': u'self'}], u'status': u'CURRENT', u'max_version': u'1.6', u'min_version': u'1.1'}]} self.v1_expected = { u'media_types': [{u'base': u'application/json', u'type': u'application/vnd.openstack.magnum.v1+json'}], u'links': [{u'href': u'http://localhost/v1/', u'rel': u'self'}, {u'href': u'http://docs.openstack.org/developer' '/magnum/dev/api-spec-v1.html', u'type': u'text/html', u'rel': u'describedby'}], u'stats': [{u'href': u'http://localhost/v1/stats/', u'rel': u'self'}, {u'href': u'http://localhost/stats/', u'rel': u'bookmark'}], u'bays': [{u'href': u'http://localhost/v1/bays/', u'rel': u'self'}, {u'href': u'http://localhost/bays/', u'rel': u'bookmark'}], u'baymodels': [{u'href': u'http://localhost/v1/baymodels/', u'rel': u'self'}, {u'href': u'http://localhost/baymodels/', u'rel': u'bookmark'}], u'clusters': [{u'href': u'http://localhost/v1/clusters/', u'rel': u'self'}, {u'href': u'http://localhost/clusters/', u'rel': u'bookmark'}], u'quotas': [{u'href': u'http://localhost/v1/quotas/', u'rel': u'self'}, {u'href': u'http://localhost/quotas/', u'rel': u'bookmark'}], u'clustertemplates': [{u'href': u'http://localhost/v1/clustertemplates/', u'rel': u'self'}, {u'href': u'http://localhost/clustertemplates/', u'rel': u'bookmark'}], u'id': u'v1', u'certificates': [{u'href': u'http://localhost/v1/certificates/', u'rel': u'self'}, {u'href': u'http://localhost/certificates/', u'rel': u'bookmark'}], u'mservices': [{u'href': u'http://localhost/v1/mservices/', u'rel': u'self'}, {u'href': u'http://localhost/mservices/', u'rel': u'bookmark'}], u'federations': [{u'href': u'http://localhost/v1/federations/', u'rel': u'self'}, {u'href': u'http://localhost/federations/', u'rel': u'bookmark'}]} def make_app(self, paste_file): file_name = self.get_path(paste_file) cfg.CONF.set_override("api_paste_config", file_name, group="api") return webtest.TestApp(app.load_app()) def test_version(self): response = self.app.get('/') self.assertEqual(self.root_expected, response.json) def test_v1_controller(self): response = self.app.get('/v1/') self.assertEqual(self.v1_expected, response.json) def test_get_not_found(self): response = self.app.get('/a/bogus/url', expect_errors=True) assert response.status_int == 404 def test_api_paste_file_not_exist(self): cfg.CONF.set_override('api_paste_config', 'non-existent-file', group='api') with mock.patch.object(cfg.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) @mock.patch('magnum.api.app.deploy') def test_api_paste_file_not_exist_not_abs(self, mock_deploy): path = self.get_path(cfg.CONF['api']['api_paste_config'] + 'test') cfg.CONF.set_override('api_paste_config', path, group='api') self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) def test_noauth(self): # Don't need to auth paste_file = "magnum/tests/unit/api/controllers/noauth-paste.ini" app = self.make_app(paste_file) response = app.get('/') self.assertEqual(self.root_expected, response.json) response = app.get('/v1/') self.assertEqual(self.v1_expected, response.json) response = app.get('/v1/clustertemplates') self.assertEqual(200, response.status_int) def test_auth_with_no_public_routes(self): # All apis need auth when access paste_file = "magnum/tests/unit/api/controllers/auth-paste.ini" app = self.make_app(paste_file) response = app.get('/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/', expect_errors=True) self.assertEqual(401, response.status_int) def test_auth_with_root_access(self): # Only / can access without auth paste_file = "magnum/tests/unit/api/controllers/auth-root-access.ini" app = self.make_app(paste_file) response = app.get('/') self.assertEqual(self.root_expected, response.json) response = app.get('/v1/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/clustermodels', expect_errors=True) self.assertEqual(401, response.status_int) def test_auth_with_v1_access(self): # Only /v1 can access without auth paste_file = "magnum/tests/unit/api/controllers/auth-v1-access.ini" app = self.make_app(paste_file) response = app.get('/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/') self.assertEqual(self.v1_expected, response.json) response = app.get('/v1/clustertemplates', expect_errors=True) self.assertEqual(401, response.status_int) class TestHeathcheck(api_base.FunctionalTest): def setUp(self): self.addCleanup(self.remove_files) super(TestHeathcheck, self).setUp() # Create Temporary file self.tempdir = self.useFixture(fixtures.TempDir()).path paste_ini = "magnum/tests/unit/api/controllers/auth-paste.ini" # Read current file and create new one config = ConfigParser.RawConfigParser() config.read(self.get_path(paste_ini)) config.set('filter:healthcheck', 'disable_by_file_path', self.tempdir + "/disable") with open(self.tempdir + "/paste.ini", 'wt') as configfile: config.write(configfile) # Set config and create app cfg.CONF.set_override("api_paste_config", self.tempdir + "/paste.ini", group="api") self.app = webtest.TestApp(app.load_app()) def remove_files(self): shutil.rmtree(self.tempdir, ignore_errors=True) def test_healthcheck_enabled(self): # Check the healthcheck works response = self.app.get('/healthcheck') self.assertEqual(200, response.status_int) self.assertEqual(b"OK", response.body) def test_healthcheck_disable_file(self): # Create the file that disables healthcheck fo = open(self.tempdir + "/disable", 'a') fo.close() response = self.app.get('/healthcheck', expect_errors=True) self.assertEqual(503, response.status_int) self.assertEqual(b"DISABLED BY FILE", response.body) class TestV1Routing(api_base.FunctionalTest): def test_route_checks_version(self): self.get_json('/') self._check_version.assert_called_once_with(mock.ANY, mock.ANY) class TestCheckVersions(test_base.TestCase): def setUp(self): super(TestCheckVersions, self).setUp() class ver(object): major = None minor = None self.version = ver() def test_check_version_invalid_major_version(self): self.version.major = v1_api.BASE_VERSION + 1 self.version.minor = v1_api.MIN_VER.minor self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version) def test_check_version_too_low(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MIN_VER.minor - 1 self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version) def test_check_version_too_high(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MAX_VER.minor + 1 e = self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version, {'fake-headers': v1_api.MAX_VER.minor}) self.assertEqual(v1_api.MAX_VER.minor, e.headers['fake-headers']) def test_check_version_ok(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MIN_VER.minor v1_api.Controller()._check_version(self.version) magnum-6.1.0/magnum/tests/unit/api/controllers/__init__.py0000666000175100017510000000000013244017334023654 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/controllers/test_base.py0000666000175100017510000003723213244017334024107 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from webob import exc from magnum.api.controllers import base from magnum.api.controllers import versions from magnum.api import versioned_method from magnum.tests import base as test_base class TestVersion(test_base.TestCase): def setUp(self): super(TestVersion, self).setUp() self.a = versions.Version( {versions.Version.string: "container-infra 2.0"}, "container-infra 2.0", "container-infra 2.1") self.b = versions.Version( {versions.Version.string: "container-infra 2.0"}, "container-infra 2.0", "container-infra 2.1") self.c = versions.Version( {versions.Version.string: "container-infra 2.2"}, "container-infra 2.0", "container-infra 2.2") def test_is_null_true(self): self.a.major = 0 self.a.minor = 0 self.assertEqual(0 == 0, self.a.is_null()) def test_is_null_false(self): self.assertEqual(2 == 0, self.a.is_null()) def test__eq__with_equal(self): self.assertEqual(2 == 2, self.a == self.b) def test__eq__with_unequal(self): self.a.major = 1 self.assertEqual(1 == 2, self.a == self.b) def test__ne__with_equal(self): self.assertEqual(2 != 2, self.a != self.b) def test__ne__with_unequal(self): self.a.major = 1 self.assertEqual(1 != 2, self.a != self.b) def test__lt__with_higher_major_version(self): self.a.major = 2 self.b.major = 1 self.assertEqual(2 < 1, self.a < self.b) def test__lt__with_lower_major_version(self): self.a.major = 1 self.b.major = 2 self.assertEqual(1 < 2, self.a < self.b) def test__lt__with_higher_minor_version(self): self.a.minor = 2 self.b.minor = 1 self.assertEqual(self.a.major, self.b.major) self.assertEqual(2 < 1, self.a < self.b) def test__lt__with_lower_minor_version(self): self.a.minor = 1 self.b.minor = 2 self.assertEqual(self.a.major, self.b.major) self.assertEqual(1 < 2, self.a < self.b) def test__gt__with_higher_major_version(self): self.a.major = 2 self.b.major = 1 self.assertEqual(2 > 1, self.a > self.b) def test__gt__with_lower_major_version(self): self.a.major = 1 self.b.major = 2 self.assertEqual(1 > 2, self.a > self.b) def test__gt__with_higher_minor_version(self): self.a.minor = 2 self.b.minor = 1 self.assertEqual(self.a.major, self.b.major) self.assertEqual(2 > 1, self.a > self.b) def test__gt__with_lower_minor_version(self): self.a.minor = 1 self.b.minor = 2 self.assertEqual(self.a.major, self.b.major) self.assertEqual(1 > 2, self.a > self.b) def test__le__with_equal(self): self.assertEqual(2 == 2, self.a <= self.b) def test__le__with_higher_version(self): self.a.major = 3 self.assertEqual(3 <= 2, self.a <= self.b) def test__le__with_lower_version(self): self.a.major = 1 self.assertEqual(1 <= 2, self.a <= self.b) def test__ge__with_equal(self): self.assertEqual(2 >= 2, self.a >= self.b) def test__ge__with_higher_version(self): self.a.major = 3 self.assertEqual(3 >= 2, self.a >= self.b) def test__ge__with_lower_version(self): self.a.major = 1 self.assertEqual(1 >= 2, self.a >= self.b) def test_matches_start_version(self): self.assertEqual(0 >= 0, self.a.matches(self.b, self.c)) def test_matches_end_version(self): self.a.minor = 2 self.assertEqual(2 <= 2, self.a.matches(self.b, self.c)) def test_matches_valid_version(self): self.a.minor = 1 self.assertEqual(0 <= 1 <= 2, self.a.matches(self.b, self.c)) def test_matches_version_too_high(self): self.a.minor = 3 self.assertEqual(0 <= 3 <= 2, self.a.matches(self.b, self.c)) def test_matches_version_too_low(self): self.a.major = 1 self.assertEqual(2 <= 1 <= 2, self.a.matches(self.b, self.c)) def test_matches_null_version(self): self.a.major = 0 self.a.minor = 0 self.assertRaises(ValueError, self.a.matches, self.b, self.c) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_init(self, mock_parse): a = mock.Mock() b = mock.Mock() mock_parse.return_value = (a, b) v = versions.Version('test', 'foo', 'bar') mock_parse.assert_called_with('test', 'foo', 'bar') self.assertEqual(a, v.major) self.assertEqual(b, v.minor) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_repr(self, mock_parse): mock_parse.return_value = (123, 456) v = versions.Version('test', mock.ANY, mock.ANY) result = "%s" % v self.assertEqual('123.456', result) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_repr_with_strings(self, mock_parse): mock_parse.return_value = ('abc', 'def') v = versions.Version('test', mock.ANY, mock.ANY) result = "%s" % v self.assertEqual('abc.def', result) def test_parse_headers_ok(self): version = versions.Version.parse_headers( {versions.Version.string: 'container-infra 123.456'}, mock.ANY, mock.ANY) self.assertEqual((123, 456), version) def test_parse_headers_latest(self): for s in ['magnum latest', 'magnum LATEST']: version = versions.Version.parse_headers( {versions.Version.string: s}, mock.ANY, 'container-infra 1.9') self.assertEqual((1, 9), version) def test_parse_headers_bad_length(self): self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'container-infra 1'}, mock.ANY, mock.ANY) self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'container-infra 1.2.3'}, mock.ANY, mock.ANY) def test_parse_no_header(self): # this asserts that the minimum version string is applied version = versions.Version.parse_headers({}, 'container-infra 1.1', 'container-infra 1.5') self.assertEqual((1, 1), version) def test_parse_incorrect_service_type(self): self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: '1.1'}, 'container-infra 1.1', 'container-infra 1.1') self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'nova 1.1'}, 'container-infra 1.1', 'container-infra 1.1') class TestController(test_base.TestCase): def test_check_for_versions_intersection_negative(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.1'), versions.Version('', '', '', '2.4'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.11'), versions.Version('', '', '', '3.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.8'), versions.Version('', '', '', '2.9'), None), ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertFalse(result) func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.12'), versions.Version('', '', '', '2.14'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '3.0'), versions.Version('', '', '', '3.4'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertFalse(result) def test_check_for_versions_intersection_positive(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.1'), versions.Version('', '', '', '2.4'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.3'), versions.Version('', '', '', '3.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.9'), versions.Version('', '', '', '3.4'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertTrue(result) def test_check_for_versions_intersection_shared_start_end(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '1.1'), versions.Version('', '', '', '1.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '1.1'), versions.Version('', '', '', '1.2'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertTrue(result) def test_api_version_decorator(self): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.2', '1.3') # noqa def testapi1(self): return 'API1_1.2_1.3' @base.Controller.api_version('2.1', '2.2') def testapi2(self): return 'API2_2.1_2.2' @base.Controller.api_version('1.0', '2.0') # noqa def testapi2(self): return 'API2_1.0_2.0' controller = MyController() # verify list was added to controller self.assertIsNotNone(controller.versioned_methods) api1_list = controller.versioned_methods['testapi1'] api2_list = controller.versioned_methods['testapi2'] # verify versioned_methods reordered correctly self.assertEqual('1.2', str(api1_list[0].start_version)) self.assertEqual('1.3', str(api1_list[0].end_version)) self.assertEqual('1.0', str(api1_list[1].start_version)) self.assertEqual('1.1', str(api1_list[1].end_version)) # verify stored methods can be called result = api1_list[0].func(controller) self.assertEqual('API1_1.2_1.3', result) result = api1_list[1].func(controller) self.assertEqual('API1_1.0_1.1', result) # verify versioned_methods reordered correctly self.assertEqual('2.1', str(api2_list[0].start_version)) self.assertEqual('2.2', str(api2_list[0].end_version)) self.assertEqual('1.0', str(api2_list[1].start_version)) self.assertEqual('2.0', str(api2_list[1].end_version)) # Verify stored methods can be called result = api2_list[0].func(controller) self.assertEqual('API2_2.1_2.2', result) result = api2_list[1].func(controller) self.assertEqual('API2_1.0_2.0', result) @mock.patch('pecan.request') def test_controller_get_attribute(self, mock_pecan_request): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.2', '1.3') # noqa def testapi1(self): return 'API1_1.2_1.3' controller = MyController() mock_pecan_request.version = versions.Version("", "", "", "1.2") controller.request = mock_pecan_request method = controller.__getattribute__('testapi1') result = method() self.assertEqual('API1_1.2_1.3', result) @mock.patch('pecan.request') def test_controller_get_attr_version_not_found(self, mock_pecan_request): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.3', '1.4') # noqa def testapi1(self): return 'API1_1.3_1.4' controller = MyController() mock_pecan_request.version = versions.Version("", "", "", "1.2") controller.request = mock_pecan_request self.assertRaises(exc.HTTPNotAcceptable, controller.__getattribute__, 'testapi1') magnum-6.1.0/magnum/tests/unit/api/__init__.py0000666000175100017510000000000013244017334021306 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/api/test_hooks.py0000666000175100017510000001355413244017334021753 0ustar zuulzuul00000000000000# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import mock from oslo_config import cfg import oslo_messaging as messaging from magnum.api.controllers import root from magnum.api import hooks from magnum.common import context as magnum_context from magnum.tests import base from magnum.tests import fakes from magnum.tests.unit.api import base as api_base class TestContextHook(base.BaseTestCase): def setUp(self): super(TestContextHook, self).setUp() self.app = fakes.FakeApp() def test_context_hook_before_method(self): state = mock.Mock(request=fakes.FakePecanRequest()) hook = hooks.ContextHook() hook.before(state) ctx = state.request.context self.assertIsInstance(ctx, magnum_context.RequestContext) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], ctx.auth_token) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Project-Id'], ctx.project_id) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Name'], ctx.user_name) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Id'], ctx.user_id) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Roles'], ','.join(ctx.roles)) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Name'], ctx.domain_name) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Id'], ctx.domain_id) self.assertIsNone(ctx.auth_token_info) def test_context_hook_before_method_auth_info(self): state = mock.Mock(request=fakes.FakePecanRequest()) state.request.environ['keystone.token_info'] = 'assert_this' hook = hooks.ContextHook() hook.before(state) ctx = state.request.context self.assertIsInstance(ctx, magnum_context.RequestContext) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], ctx.auth_token) self.assertEqual('assert_this', ctx.auth_token_info) class TestNoExceptionTracebackHook(api_base.FunctionalTest): TRACE = [u'Traceback (most recent call last):', u' File "/opt/stack/magnum/magnum/openstack/common/rpc/amqp.py",' ' line 434, in _process_data\\n **args)', u' File "/opt/stack/magnum/magnum/openstack/common/rpc/' 'dispatcher.py", line 172, in dispatch\\n result =' ' getattr(proxyobj, method)(context, **kwargs)'] MSG_WITHOUT_TRACE = "Test exception message." MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) def setUp(self): super(TestNoExceptionTracebackHook, self).setUp() p = mock.patch.object(root.Root, 'convert') self.root_convert_mock = p.start() self.addCleanup(p.stop) def test_hook_exception_success(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_remote_error_success(self): test_exc_type = 'TestException' self.root_convert_mock.side_effect = messaging.rpc.RemoteError( test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) # NOTE(max_lobur): For RemoteError the client message will still have # some garbage because in RemoteError traceback is serialized as a list # instead of'\n'.join(trace). But since RemoteError is kind of very # rare thing (happens due to wrong deserialization settings etc.) # we don't care about this garbage. if six.PY2: expected_msg = ("Remote error: %s %s" % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n[u'") else: expected_msg = ("Remote error: %s %s" % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n['") actual_msg = response.json['errors'][0]['detail'] self.assertEqual(expected_msg, actual_msg) def test_hook_without_traceback(self): msg = "Error message without traceback \n but \n multiline" self.root_convert_mock.side_effect = Exception(msg) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(msg, actual_msg) def test_hook_server_debug_on_serverfault(self): cfg.CONF.set_override('debug', True) self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_server_debug_on_clientfault(self): cfg.CONF.set_override('debug', True) client_error = Exception(self.MSG_WITH_TRACE) client_error.code = 400 self.root_convert_mock.side_effect = client_error response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITH_TRACE, actual_msg) magnum-6.1.0/magnum/tests/unit/api/utils.py0000666000175100017510000000672713244017334020735 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utils for testing the API service. """ import datetime import pytz from magnum.api.controllers.v1 import bay as bay_controller from magnum.api.controllers.v1 import baymodel as baymodel_controller from magnum.api.controllers.v1 import cluster as cluster_controller from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl from magnum.api.controllers.v1 import federation as federation_controller from magnum.tests.unit.db import utils def remove_internal(values, internal): # NOTE(yuriyz): internal attributes should not be posted, except uuid int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] return {k: v for (k, v) in values.items() if k not in int_attr} def baymodel_post_data(**kw): baymodel = utils.get_test_cluster_template(**kw) internal = baymodel_controller.BayModelPatchType.internal_attrs() return remove_internal(baymodel, internal) def cluster_template_post_data(**kw): cluster_template = utils.get_test_cluster_template(**kw) internal = cluster_tmp_ctrl.ClusterTemplatePatchType.internal_attrs() return remove_internal(cluster_template, internal) def bay_post_data(**kw): bay = utils.get_test_cluster(**kw) bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id']) bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15) del bay['cluster_template_id'] del bay['create_timeout'] internal = bay_controller.BayPatchType.internal_attrs() return remove_internal(bay, internal) def cluster_post_data(**kw): cluster = utils.get_test_cluster(**kw) cluster['create_timeout'] = kw.get('create_timeout', 15) internal = cluster_controller.ClusterPatchType.internal_attrs() return remove_internal(cluster, internal) def cert_post_data(**kw): return { 'cluster_uuid': kw.get('cluster_uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'csr': kw.get('csr', 'fake-csr'), 'pem': kw.get('pem', 'fake-pem') } def quota_post_data(**kw): return utils.get_test_quota(**kw) def mservice_get_data(**kw): """Simulate what the RPC layer will get from DB """ faketime = datetime.datetime(2001, 1, 1, tzinfo=pytz.UTC) return { 'binary': kw.get('binary', 'magnum-conductor'), 'host': kw.get('host', 'fake-host'), 'id': kw.get('id', 13), 'report_count': kw.get('report_count', 13), 'disabled': kw.get('disabled', False), 'disabled_reason': kw.get('disabled_reason', None), 'forced_down': kw.get('forced_down', False), 'last_seen_at': kw.get('last_seen_at', faketime), 'created_at': kw.get('created_at', faketime), 'updated_at': kw.get('updated_at', faketime), } def federation_post_data(**kw): federation = utils.get_test_federation(**kw) internal = federation_controller.FederationPatchType.internal_attrs() return remove_internal(federation, internal) magnum-6.1.0/magnum/tests/unit/conf/0000775000175100017510000000000013244017675017371 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/conf/test_conf.py0000666000175100017510000000541413244017334021725 0ustar zuulzuul00000000000000# Copyright 2016 Fujitsu Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_config import cfg import six from magnum.conf import opts from magnum.tests import base class ConfTestCase(base.TestCase): def test_list_opts(self): for group, opt_list in opts.list_opts(): if isinstance(group, six.string_types): self.assertEqual(group, 'DEFAULT') else: self.assertIsInstance(group, cfg.OptGroup) for opt in opt_list: self.assertIsInstance(opt, cfg.Opt) def test_list_module_name_invalid_mods(self): with mock.patch('pkgutil.iter_modules') as mock_mods: mock_mods.return_value = [(None, 'foo', True), (None, 'opts', False)] self.assertEqual([], opts._list_module_names()) def test_list_module_name_valid_mods(self): with mock.patch('pkgutil.iter_modules') as mock_mods: mock_mods.return_value = [(None, 'foo', False)] self.assertEqual(['foo'], opts._list_module_names()) def test_import_mods_no_func(self): modules = ['foo', 'bar'] with mock.patch('importlib.import_module') as mock_import: mock_import.return_value = mock.sentinel.mods self.assertRaises(AttributeError, opts._import_modules, modules) mock_import.assert_called_once_with('magnum.conf.foo') def test_import_mods_valid_func(self): modules = ['foo', 'bar'] with mock.patch('importlib.import_module') as mock_import: mock_mod = mock.MagicMock() mock_import.return_value = mock_mod self.assertEqual([mock_mod, mock_mod], opts._import_modules(modules)) mock_import.assert_has_calls([mock.call('magnum.conf.foo'), mock.call('magnum.conf.bar')]) def test_append_config(self): opt = collections.defaultdict(list) mock_module = mock.MagicMock() mock_conf = mock.MagicMock() mock_module.list_opts.return_value = mock_conf mock_conf.items.return_value = [('foo', 'bar')] opts._append_config_options([mock_module], opt) self.assertEqual({'foo': ['b', 'a', 'r']}, opt) magnum-6.1.0/magnum/tests/unit/conf/__init__.py0000666000175100017510000000000013244017334021462 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/drivers/0000775000175100017510000000000013244017675020122 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/drivers/test_template_definition.py0000666000175100017510000013603713244017334025562 0ustar zuulzuul00000000000000# Copyright 2015 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from neutronclient.common import exceptions as n_exception import six from magnum.common import exception import magnum.conf from magnum.drivers.common import driver from magnum.drivers.heat import template_def as cmn_tdef from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr from magnum.drivers.k8s_coreos_v1 import template_def as k8s_coreos_tdef from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8sa_dr from magnum.drivers.k8s_fedora_atomic_v1 import template_def as k8sa_tdef from magnum.drivers.k8s_fedora_ironic_v1 import driver as k8s_i_dr from magnum.drivers.k8s_fedora_ironic_v1 import template_def as k8si_tdef from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr from magnum.drivers.mesos_ubuntu_v1 import template_def as mesos_tdef from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr from magnum.drivers.swarm_fedora_atomic_v1 import template_def as swarm_tdef from magnum.tests import base from requests import exceptions as req_exceptions CONF = magnum.conf.CONF class TemplateDefinitionTestCase(base.TestCase): @mock.patch.object(driver, 'iter_entry_points') def test_load_entry_points(self, mock_iter_entry_points): mock_entry_point = mock.MagicMock() mock_entry_points = [mock_entry_point] mock_iter_entry_points.return_value = mock_entry_points.__iter__() entry_points = driver.Driver.load_entry_points() for (expected_entry_point, (actual_entry_point, loaded_cls)) in zip(mock_entry_points, entry_points): self.assertEqual(expected_entry_point, actual_entry_point) expected_entry_point.load.assert_called_once_with(require=False) @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_vm_atomic_kubernetes_definition(self, mock_driver): mock_driver.return_value = k8sa_dr.Driver() cluster_driver = driver.Driver.get_driver('vm', 'fedora-atomic', 'kubernetes') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, k8sa_tdef.AtomicK8sTemplateDefinition) @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_bm_fedora_kubernetes_ironic_definition(self, mock_driver): mock_driver.return_value = k8s_i_dr.Driver() cluster_driver = driver.Driver.get_driver('bm', 'fedora', 'kubernetes') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, k8si_tdef.FedoraK8sIronicTemplateDefinition) @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_vm_coreos_kubernetes_definition(self, mock_driver): mock_driver.return_value = k8s_coreos_dr.Driver() cluster_driver = driver.Driver.get_driver('vm', 'coreos', 'kubernetes') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, k8s_coreos_tdef.CoreOSK8sTemplateDefinition) @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_vm_atomic_swarm_definition(self, mock_driver): mock_driver.return_value = swarm_dr.Driver() cluster_driver = driver.Driver.get_driver('vm', 'fedora-atomic', 'swarm') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, swarm_tdef.AtomicSwarmTemplateDefinition) @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_vm_ubuntu_mesos_definition(self, mock_driver): mock_driver.return_value = mesos_dr.Driver() cluster_driver = driver.Driver.get_driver('vm', 'ubuntu', 'mesos') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, mesos_tdef.UbuntuMesosTemplateDefinition) def test_get_driver_not_supported(self): self.assertRaises(exception.ClusterTypeNotSupported, driver.Driver.get_driver, 'vm', 'not_supported', 'kubernetes') def test_required_param_not_set(self): param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test', required=True) mock_cluster_template = mock.MagicMock() mock_cluster_template.test = None self.assertRaises(exception.RequiredParameterNotProvided, param.set_param, {}, mock_cluster_template, None) def test_output_mapping(self): heat_outputs = [ { "output_value": "value1", "description": "No description given", "output_key": "key1" }, { "output_value": ["value2", "value3"], "description": "No description given", "output_key": "key2" } ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': heat_outputs} output = cmn_tdef.OutputMapping('key1') value = output.get_output_value(mock_stack) self.assertEqual('value1', value) output = cmn_tdef.OutputMapping('key2') value = output.get_output_value(mock_stack) self.assertEqual(["value2", "value3"], value) output = cmn_tdef.OutputMapping('key3') value = output.get_output_value(mock_stack) self.assertIsNone(value) # verify stack with no 'outputs' attribute mock_stack.to_dict.return_value = {} output = cmn_tdef.OutputMapping('key1') value = output.get_output_value(mock_stack) self.assertIsNone(value) def test_add_output_with_mapping_type(self): definition = k8sa_dr.Driver().get_template_definition() mock_args = [1, 3, 4] mock_kwargs = {'test': 'test'} mock_mapping_type = mock.MagicMock() mock_mapping_type.return_value = mock.MagicMock() definition.add_output(mapping_type=mock_mapping_type, *mock_args, **mock_kwargs) mock_mapping_type.assert_called_once_with(*mock_args, **mock_kwargs) self.assertIn(mock_mapping_type.return_value, definition.output_mappings) @six.add_metaclass(abc.ABCMeta) class BaseTemplateDefinitionTestCase(base.TestCase): @abc.abstractmethod def get_definition(self): """Returns the template definition.""" pass def _test_update_outputs_server_addrtess( self, floating_ip_enabled=True, public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', cluster_attr='master_addresses', ): definition = self.get_definition() expected_address = expected_public_address = ['public'] expected_private_address = ['private'] if not floating_ip_enabled: expected_address = expected_private_address outputs = [ {"output_value": expected_public_address, "description": "No description given", "output_key": public_ip_output_key}, {"output_value": expected_private_address, "description": "No description given", "output_key": private_ip_output_key}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster_template = mock.MagicMock() mock_cluster_template.floating_ip_enabled = floating_ip_enabled definition.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual(expected_address, getattr(mock_cluster, cluster_attr)) class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): def get_definition(self): return k8sa_dr.Driver().get_template_definition() @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def' '.AtomicK8sTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_k8s_get_params(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'flannel' mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id mock_scale_manager = mock.MagicMock() mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc removal_nodes = ['node1', 'node2'] mock_scale_manager.get_removal_nodes.return_value = removal_nodes mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' flannel_cidr = mock_cluster.labels.get('flannel_network_cidr') flannel_subnet = mock_cluster.labels.get( 'flannel_network_subnetlen') flannel_backend = mock_cluster.labels.get('flannel_backend') system_pods_initial_delay = mock_cluster.labels.get( 'system_pods_initial_delay') system_pods_timeout = mock_cluster.labels.get( 'system_pods_timeout') admission_control_list = mock_cluster.labels.get( 'admission_control_list') prometheus_monitoring = mock_cluster.labels.get( 'prometheus_monitoring') grafana_admin_passwd = mock_cluster.labels.get( 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') influx_grafana_dashboard_enabled = mock_cluster.labels.get( 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') kube_tag = mock_cluster.labels.get('kube_tag') etcd_tag = mock_cluster.labels.get('etcd_tag') flannel_tag = mock_cluster.labels.get('flannel_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') cert_manager_api = mock_cluster.labels.get('cert_manager_api') calico_tag = mock_cluster.labels.get( 'calico_tag') calico_cni_tag = mock_cluster.labels.get( 'calico_cni_tag') calico_kube_controllers_tag = mock_cluster.labels.get( 'calico_kube_controllers_tag') calico_ipv4pool = mock_cluster.labels.get( 'calico_ipv4pool') if mock_cluster_template.network_driver == 'flannel': pods_network_cidr = flannel_cidr elif mock_cluster_template.network_driver == 'calico': pods_network_cidr = calico_ipv4pool ingress_controller = mock_cluster.labels.get( 'ingress_controller') ingress_controller_role = mock_cluster.labels.get( 'ingress_controller_role') kubelet_options = mock_cluster.labels.get( 'kubelet_options') kubeapi_options = mock_cluster.labels.get( 'kubeapi_options') kubecontroller_options = mock_cluster.labels.get( 'kubecontroller_options') kubescheduler_options = mock_cluster.labels.get( 'kubescheduler_options') kubeproxy_options = mock_cluster.labels.get( 'kubeproxy_options') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster, scale_manager=mock_scale_manager) expected_kwargs = {'extra_params': { 'minions_to_remove': removal_nodes, 'discovery_url': 'fake_discovery_url', 'flannel_network_cidr': flannel_cidr, 'flannel_network_subnetlen': flannel_subnet, 'flannel_backend': flannel_backend, 'system_pods_initial_delay': system_pods_initial_delay, 'system_pods_timeout': system_pods_timeout, 'admission_control_list': admission_control_list, 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, 'influx_grafana_dashboard_enabled': influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'etcd_volume_size': etcd_volume_size, 'kubelet_options': kubelet_options, 'kubeapi_options': kubeapi_options, 'kubecontroller_options': kubecontroller_options, 'kubescheduler_options': kubescheduler_options, 'kubeproxy_options': kubeproxy_options, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'kube_tag': kube_tag, 'etcd_tag': etcd_tag, 'flannel_tag': flannel_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, 'cert_manager_api': cert_manager_api, 'calico_tag': calico_tag, 'calico_cni_tag': calico_cni_tag, 'calico_kube_controllers_tag': calico_kube_controllers_tag, 'calico_ipv4pool': calico_ipv4pool, 'pods_network_cidr': pods_network_cidr, 'ingress_controller': ingress_controller, 'ingress_controller_role': ingress_controller_role}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.heat.template_def' '.BaseTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_k8s_get_params_insecure(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = True mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'calico' mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id mock_scale_manager = mock.MagicMock() mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value mock_osc_class.return_value = mock_osc removal_nodes = ['node1', 'node2'] mock_scale_manager.get_removal_nodes.return_value = removal_nodes mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' flannel_cidr = mock_cluster.labels.get('flannel_network_cidr') flannel_subnet = mock_cluster.labels.get( 'flannel_network_subnetlen') flannel_backend = mock_cluster.labels.get('flannel_backend') system_pods_initial_delay = mock_cluster.labels.get( 'system_pods_initial_delay') system_pods_timeout = mock_cluster.labels.get( 'system_pods_timeout') admission_control_list = mock_cluster.labels.get( 'admission_control_list') prometheus_monitoring = mock_cluster.labels.get( 'prometheus_monitoring') grafana_admin_passwd = mock_cluster.labels.get( 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') influx_grafana_dashboard_enabled = mock_cluster.labels.get( 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') kube_tag = mock_cluster.labels.get('kube_tag') etcd_tag = mock_cluster.labels.get('etcd_tag') flannel_tag = mock_cluster.labels.get('flannel_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') cert_manager_api = mock_cluster.labels.get('cert_manager_api') calico_tag = mock_cluster.labels.get( 'calico_tag') calico_cni_tag = mock_cluster.labels.get( 'calico_cni_tag') calico_kube_controllers_tag = mock_cluster.labels.get( 'calico_kube_controllers_tag') calico_ipv4pool = mock_cluster.labels.get( 'calico_ipv4pool') if mock_cluster_template.network_driver == 'flannel': pods_network_cidr = flannel_cidr elif mock_cluster_template.network_driver == 'calico': pods_network_cidr = calico_ipv4pool ingress_controller = mock_cluster.labels.get( 'ingress_controller') ingress_controller_role = mock_cluster.labels.get( 'ingress_controller_role') kubelet_options = mock_cluster.labels.get( 'kubelet_options') kubeapi_options = mock_cluster.labels.get( 'kubeapi_options') kubecontroller_options = mock_cluster.labels.get( 'kubecontroller_options') kubescheduler_options = mock_cluster.labels.get( 'kubescheduler_options') kubeproxy_options = mock_cluster.labels.get( 'kubeproxy_options') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster, scale_manager=mock_scale_manager) expected_kwargs = {'extra_params': { 'minions_to_remove': removal_nodes, 'discovery_url': 'fake_discovery_url', 'flannel_network_cidr': flannel_cidr, 'flannel_network_subnetlen': flannel_subnet, 'flannel_backend': flannel_backend, 'system_pods_initial_delay': system_pods_initial_delay, 'system_pods_timeout': system_pods_timeout, 'admission_control_list': admission_control_list, 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, 'influx_grafana_dashboard_enabled': influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'etcd_volume_size': etcd_volume_size, 'kubelet_options': kubelet_options, 'kubeapi_options': kubeapi_options, 'kubecontroller_options': kubecontroller_options, 'kubescheduler_options': kubescheduler_options, 'kubeproxy_options': kubeproxy_options, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'loadbalancing_protocol': 'HTTP', 'kubernetes_port': 8080, 'kube_tag': kube_tag, 'etcd_tag': etcd_tag, 'flannel_tag': flannel_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, 'cert_manager_api': cert_manager_api, 'calico_tag': calico_tag, 'calico_cni_tag': calico_cni_tag, 'calico_kube_controllers_tag': calico_kube_controllers_tag, 'calico_ipv4pool': calico_ipv4pool, 'pods_network_cidr': pods_network_cidr, 'ingress_controller': ingress_controller, 'ingress_controller_role': ingress_controller_role}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) @mock.patch('requests.get') def test_k8s_validate_discovery_url(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() k8s_def.validate_discovery_url('http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_fail(self, mock_get): mock_get.side_effect = req_exceptions.RequestException() k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.GetClusterSizeFailed, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_invalid(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = str('{"action":"get"}') mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterDiscoveryURL, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_unexpect_size(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterSize, k8s_def.validate_discovery_url, 'http://etcd/test', 5) @mock.patch('requests.get') def test_k8s_get_discovery_url(self, mock_get): CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') expected_discovery_url = 'http://etcd/token' mock_resp = mock.MagicMock() mock_resp.text = expected_discovery_url mock_get.return_value = mock_resp mock_cluster = mock.MagicMock() mock_cluster.master_count = 10 mock_cluster.discovery_url = None k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() discovery_url = k8s_def.get_discovery_url(mock_cluster) mock_get.assert_called_once_with('http://etcd/test?size=10') self.assertEqual(expected_discovery_url, mock_cluster.discovery_url) self.assertEqual(expected_discovery_url, discovery_url) @mock.patch('requests.get') def test_k8s_get_discovery_url_fail(self, mock_get): CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') mock_get.side_effect = req_exceptions.RequestException() mock_cluster = mock.MagicMock() mock_cluster.master_count = 10 mock_cluster.discovery_url = None k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.GetDiscoveryUrlFailed, k8s_def.get_discovery_url, mock_cluster) def test_k8s_get_heat_param(self): k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() heat_param = k8s_def.get_heat_param(cluster_attr='node_count') self.assertEqual('number_of_minions', heat_param) @mock.patch('requests.get') def test_k8s_get_discovery_url_not_found(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = '' mock_get.return_value = mock_resp fake_cluster = mock.MagicMock() fake_cluster.discovery_url = None self.assertRaises( exception.InvalidDiscoveryURL, k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url, fake_cluster) def _test_update_outputs_api_address(self, template_definition, params, tls=True): expected_api_address = '%(protocol)s://%(address)s:%(port)s' % params outputs = [ {"output_value": params['address'], "description": "No description given", "output_key": 'api_address'}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = tls template_definition.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual(expected_api_address, mock_cluster.api_address) def test_update_k8s_outputs_api_address(self): address = 'updated_address' protocol = 'http' port = '8080' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() self._test_update_outputs_api_address(template_definition, params) def test_update_swarm_outputs_api_address(self): address = 'updated_address' protocol = 'tcp' port = '2376' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() self._test_update_outputs_api_address(template_definition, params) def test_update_k8s_outputs_if_cluster_template_is_secure(self): address = 'updated_address' protocol = 'https' port = '6443' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() self._test_update_outputs_api_address(template_definition, params, tls=False) def test_update_swarm_outputs_if_cluster_template_is_secure(self): address = 'updated_address' protocol = 'tcp' port = '2376' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() self._test_update_outputs_api_address(template_definition, params, tls=False) def _test_update_outputs_none_api_address(self, template_definition, params, tls=True): outputs = [ {"output_value": params['address'], "description": "No description given", "output_key": 'api_address'}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster.api_address = 'none_api_address' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = tls template_definition.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual('none_api_address', mock_cluster.api_address) def test_update_k8s_outputs_none_api_address(self): protocol = 'http' port = '8080' params = { 'protocol': protocol, 'address': None, 'port': port, } template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() self._test_update_outputs_none_api_address(template_definition, params) def test_update_swarm_outputs_none_api_address(self): protocol = 'tcp' port = '2376' params = { 'protocol': protocol, 'address': None, 'port': port, } template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() self._test_update_outputs_none_api_address(template_definition, params) def test_update_outputs_master_address(self): self._test_update_outputs_server_addrtess( public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', cluster_attr='master_addresses', ) def test_update_outputs_node_address(self): self._test_update_outputs_server_addrtess( public_ip_output_key='kube_minions', private_ip_output_key='kube_minions_private', cluster_attr='node_addresses', ) def test_update_outputs_master_address_fip_disabled(self): self._test_update_outputs_server_addrtess( floating_ip_enabled=False, public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', cluster_attr='master_addresses', ) def test_update_outputs_node_address_fip_disabled(self): self._test_update_outputs_server_addrtess( floating_ip_enabled=False, public_ip_output_key='kube_minions', private_ip_output_key='kube_minions_private', cluster_attr='node_addresses', ) class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase): def get_definition(self): return k8s_i_dr.Driver().get_template_definition() def assert_neutron_find(self, mock_neutron_v20_find, osc, cluster_template): mock_neutron_v20_find.assert_called_once_with( osc.neutron(), 'subnet', cluster_template.fixed_subnet ) def assert_raises_from_get_fixed_network_id( self, mock_neutron_v20_find, exeption_from_neutron_client, expected_exception_class ): definition = self.get_definition() osc = mock.MagicMock() cluster_template = mock.MagicMock() mock_neutron_v20_find.side_effect = exeption_from_neutron_client self.assertRaises( expected_exception_class, definition.get_fixed_network_id, osc, cluster_template ) @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') def test_get_fixed_network_id(self, mock_neutron_v20_find): expected_network_id = 'expected_network_id' osc = mock.MagicMock() cluster_template = mock.MagicMock() definition = self.get_definition() mock_neutron_v20_find.return_value = { 'ip_version': 4, 'network_id': expected_network_id, } self.assertEqual( expected_network_id, definition.get_fixed_network_id(osc, cluster_template) ) self.assert_neutron_find(mock_neutron_v20_find, osc, cluster_template) @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') def test_get_fixed_network_id_with_invalid_ip_ver(self, mock_neutron_v20_find): osc = mock.MagicMock() cluster_template = mock.MagicMock() definition = self.get_definition() mock_neutron_v20_find.return_value = { 'ip_version': 6, 'network_id': 'expected_network_id', } self.assertRaises( exception.InvalidSubnet, definition.get_fixed_network_id, osc, cluster_template ) @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') def test_get_fixed_network_id_with_duplicated_name(self, mock_neutron_v20_find): ex = n_exception.NeutronClientNoUniqueMatch( resource='subnet', name='duplicated-name' ) self.assert_raises_from_get_fixed_network_id( mock_neutron_v20_find, ex, exception.InvalidSubnet, ) @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') def test_get_fixed_network_id_with_client_error(self, mock_neutron_v20_find): ex = n_exception.BadRequest() self.assert_raises_from_get_fixed_network_id( mock_neutron_v20_find, ex, exception.InvalidSubnet, ) @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') def test_get_fixed_network_id_with_server_error(self, mock_neutron_v20_find): ex = n_exception.ServiceUnavailable() self.assert_raises_from_get_fixed_network_id( mock_neutron_v20_find, ex, n_exception.ServiceUnavailable, ) class AtomicSwarmTemplateDefinitionTestCase(base.TestCase): @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.swarm_fedora_atomic_v1.template_def' '.AtomicSwarmTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_swarm_get_params(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc_class.return_value = mock_osc mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_context.tenant = 'fake_tenant' docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') flannel_cidr = mock_cluster.labels.get('flannel_network_cidr') flannel_subnet = mock_cluster.labels.get( 'flannel_network_subnetlen') flannel_backend = mock_cluster.labels.get('flannel_backend') rexray_preempt = mock_cluster.labels.get('rexray_preempt') swarm_strategy = mock_cluster.labels.get('swarm_strategy') swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() swarm_def.get_params(mock_context, mock_cluster_template, mock_cluster) expected_kwargs = {'extra_params': { 'discovery_url': 'fake_discovery_url', 'magnum_url': mock_osc.magnum_url.return_value, 'flannel_network_cidr': flannel_cidr, 'flannel_backend': flannel_backend, 'flannel_network_subnetlen': flannel_subnet, 'auth_url': 'http://192.168.10.10:5000/v3', 'rexray_preempt': rexray_preempt, 'swarm_strategy': swarm_strategy, 'docker_volume_type': docker_volume_type, 'nodes_affinity_policy': 'soft-anti-affinity'}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) @mock.patch('requests.get') def test_swarm_validate_discovery_url(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() k8s_def.validate_discovery_url('http://etcd/test', 1) @mock.patch('requests.get') def test_swarm_validate_discovery_url_fail(self, mock_get): mock_get.side_effect = req_exceptions.RequestException() k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.GetClusterSizeFailed, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_swarm_validate_discovery_url_invalid(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = str('{"action":"get"}') mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterDiscoveryURL, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_swarm_validate_discovery_url_unexpect_size(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterSize, k8s_def.validate_discovery_url, 'http://etcd/test', 5) @mock.patch('requests.get') def test_swarm_get_discovery_url(self, mock_get): CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') expected_discovery_url = 'http://etcd/token' mock_resp = mock.MagicMock() mock_resp.text = expected_discovery_url mock_get.return_value = mock_resp mock_cluster = mock.MagicMock() mock_cluster.discovery_url = None swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() discovery_url = swarm_def.get_discovery_url(mock_cluster) mock_get.assert_called_once_with('http://etcd/test?size=1') self.assertEqual(mock_cluster.discovery_url, expected_discovery_url) self.assertEqual(discovery_url, expected_discovery_url) @mock.patch('requests.get') def test_swarm_get_discovery_url_not_found(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = '' mock_get.return_value = mock_resp fake_cluster = mock.MagicMock() fake_cluster.discovery_url = None self.assertRaises( exception.InvalidDiscoveryURL, k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url, fake_cluster) def test_swarm_get_heat_param(self): swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() heat_param = swarm_def.get_heat_param(cluster_attr='node_count') self.assertEqual('number_of_nodes', heat_param) def test_update_outputs(self): swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() expected_api_address = 'updated_address' expected_node_addresses = ['ex_minion', 'address'] outputs = [ {"output_value": expected_api_address, "description": "No description given", "output_key": "api_address"}, {"output_value": ['any', 'output'], "description": "No description given", "output_key": "swarm_master_private"}, {"output_value": ['any', 'output'], "description": "No description given", "output_key": "swarm_master"}, {"output_value": ['any', 'output'], "description": "No description given", "output_key": "swarm_nodes_private"}, {"output_value": expected_node_addresses, "description": "No description given", "output_key": "swarm_nodes"}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster_template = mock.MagicMock() swarm_def.update_outputs(mock_stack, mock_cluster_template, mock_cluster) expected_api_address = "tcp://%s:2376" % expected_api_address self.assertEqual(expected_api_address, mock_cluster.api_address) self.assertEqual(expected_node_addresses, mock_cluster.node_addresses) class UbuntuMesosTemplateDefinitionTestCase(base.TestCase): @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_mesos_get_params(self, mock_get_output, mock_get_params, mock_osc_class): mock_context = mock.MagicMock() mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'mesos_user' mock_context.project_id = 'admin' mock_context.domain_name = 'domainname' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id rexray_preempt = mock_cluster.labels.get('rexray_preempt') mesos_slave_isolation = mock_cluster.labels.get( 'mesos_slave_isolation') mesos_slave_work_dir = mock_cluster.labels.get( 'mesos_slave_work_dir') mesos_slave_image_providers = mock_cluster.labels.get( 'image_providers') mesos_slave_executor_env_variables = mock_cluster.labels.get( 'mesos_slave_executor_env_variables') mock_osc = mock.MagicMock() mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc removal_nodes = ['node1', 'node2'] mock_scale_manager = mock.MagicMock() mock_scale_manager.get_removal_nodes.return_value = removal_nodes mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() mesos_def.get_params(mock_context, mock_cluster_template, mock_cluster, scale_manager=mock_scale_manager) expected_kwargs = {'extra_params': { 'region_name': mock_osc.cinder_region_name.return_value, 'auth_url': 'http://192.168.10.10:5000/v3', 'username': 'mesos_user', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': rexray_preempt, 'mesos_slave_isolation': mesos_slave_isolation, 'mesos_slave_work_dir': mesos_slave_work_dir, 'mesos_slave_executor_env_variables': mesos_slave_executor_env_variables, 'mesos_slave_image_providers': mesos_slave_image_providers, 'slaves_to_remove': removal_nodes}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) def test_mesos_get_heat_param(self): mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() heat_param = mesos_def.get_heat_param(cluster_attr='node_count') self.assertEqual('number_of_slaves', heat_param) heat_param = mesos_def.get_heat_param(cluster_attr='master_count') self.assertEqual('number_of_masters', heat_param) def test_update_outputs(self): mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() expected_api_address = 'updated_address' expected_node_addresses = ['ex_slave', 'address'] expected_master_addresses = ['ex_master', 'address'] outputs = [ {"output_value": expected_api_address, "description": "No description given", "output_key": "api_address"}, {"output_value": ['any', 'output'], "description": "No description given", "output_key": "mesos_master_private"}, {"output_value": expected_master_addresses, "description": "No description given", "output_key": "mesos_master"}, {"output_value": ['any', 'output'], "description": "No description given", "output_key": "mesos_slaves_private"}, {"output_value": expected_node_addresses, "description": "No description given", "output_key": "mesos_slaves"}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster_template = mock.MagicMock() mesos_def.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual(expected_api_address, mock_cluster.api_address) self.assertEqual(expected_node_addresses, mock_cluster.node_addresses) self.assertEqual(expected_master_addresses, mock_cluster.master_addresses) magnum-6.1.0/magnum/tests/unit/drivers/test_heat_driver.py0000666000175100017510000002054413244017334024026 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import patch import magnum.conf from magnum.drivers.heat import driver as heat_driver from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import base from magnum.tests.unit.db import utils CONF = magnum.conf.CONF class TestHeatPoller(base.TestCase): @patch('magnum.conductor.utils.retrieve_cluster_template') @patch('oslo_config.cfg') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def setup_poll_test(self, mock_driver, mock_openstack_client, cfg, mock_retrieve_cluster_template): cfg.CONF.cluster_heat.max_attempts = 10 cluster = mock.MagicMock() cluster_template_dict = utils.get_test_cluster_template( coe='kubernetes') mock_heat_stack = mock.MagicMock() mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client.heat.return_value = mock_heat_client cluster_template = objects.ClusterTemplate(self.context, **cluster_template_dict) mock_retrieve_cluster_template.return_value = cluster_template mock_driver.return_value = k8s_atomic_dr.Driver() poller = heat_driver.HeatPoller(mock_openstack_client, mock.MagicMock(), cluster, k8s_atomic_dr.Driver()) poller.get_version_info = mock.MagicMock() return (mock_heat_stack, cluster, poller) def test_poll_no_save(self): mock_heat_stack, cluster, poller = self.setup_poll_test() cluster.status = cluster_status.CREATE_IN_PROGRESS mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() self.assertEqual(0, cluster.save.call_count) def test_poll_save(self): mock_heat_stack, cluster, poller = self.setup_poll_test() cluster.status = cluster_status.CREATE_IN_PROGRESS mock_heat_stack.stack_status = cluster_status.CREATE_FAILED mock_heat_stack.stack_status_reason = 'Create failed' self.assertIsNone(poller.poll_and_check()) self.assertEqual(2, cluster.save.call_count) self.assertEqual(cluster_status.CREATE_FAILED, cluster.status) self.assertEqual('Create failed', cluster.status_reason) def test_poll_done(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE self.assertIsNone(poller.poll_and_check()) mock_heat_stack.stack_status = cluster_status.CREATE_FAILED self.assertIsNone(poller.poll_and_check()) def test_poll_done_by_update(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE mock_heat_stack.parameters = {'number_of_minions': 2} self.assertIsNone(poller.poll_and_check()) self.assertEqual(1, cluster.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(2, cluster.node_count) def test_poll_done_by_update_failed(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED mock_heat_stack.parameters = {'number_of_minions': 2} self.assertIsNone(poller.poll_and_check()) self.assertEqual(2, cluster.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(2, cluster.node_count) def test_poll_done_by_rollback_complete(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE mock_heat_stack.parameters = {'number_of_minions': 1} self.assertIsNone(poller.poll_and_check()) self.assertEqual(2, cluster.save.call_count) self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status) self.assertEqual(1, cluster.node_count) def test_poll_done_by_rollback_failed(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED mock_heat_stack.parameters = {'number_of_minions': 1} self.assertIsNone(poller.poll_and_check()) self.assertEqual(2, cluster.save.call_count) self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status) self.assertEqual(1, cluster.node_count) def test_poll_destroy(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.DELETE_FAILED self.assertIsNone(poller.poll_and_check()) # Destroy method is not called when stack delete failed self.assertEqual(0, cluster.destroy.call_count) mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS poller.poll_and_check() self.assertEqual(0, cluster.destroy.call_count) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE self.assertIsNone(poller.poll_and_check()) # destroy and notifications are handled up the stack now self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.status) def test_poll_node_count(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_minions': 1} mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() self.assertEqual(1, cluster.node_count) def test_poll_node_count_by_update(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.parameters = {'number_of_minions': 2} mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE self.assertIsNone(poller.poll_and_check()) self.assertEqual(2, cluster.node_count) @patch('magnum.drivers.heat.driver.trust_manager') @patch('magnum.drivers.heat.driver.cert_manager') def test_delete_complete(self, cert_manager, trust_manager): mock_heat_stack, cluster, poller = self.setup_poll_test() poller._delete_complete() self.assertEqual( 1, cert_manager.delete_certificates_from_cluster.call_count) self.assertEqual(1, trust_manager.delete_trustee_and_trust.call_count) def test_create_or_complete(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE mock_heat_stack.stack_status_reason = 'stack complete' poller._sync_cluster_and_template_status(mock_heat_stack) self.assertEqual('stack complete', cluster.status_reason) self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_sync_cluster_status(self): mock_heat_stack, cluster, poller = self.setup_poll_test() mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS mock_heat_stack.stack_status_reason = 'stack incomplete' poller._sync_cluster_status(mock_heat_stack) self.assertEqual('stack incomplete', cluster.status_reason) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) @patch('magnum.drivers.heat.driver.LOG') def test_cluster_failed(self, logger): mock_heat_stack, cluster, poller = self.setup_poll_test() poller._sync_cluster_and_template_status(mock_heat_stack) poller._cluster_failed(mock_heat_stack) self.assertEqual(1, logger.error.call_count) magnum-6.1.0/magnum/tests/unit/drivers/__init__.py0000666000175100017510000000000013244017334022213 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/__init__.py0000666000175100017510000000000013244017334020535 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/db/0000775000175100017510000000000013244017675017031 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/db/sqlalchemy/0000775000175100017510000000000013244017675021173 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/db/sqlalchemy/test_types.py0000666000175100017510000000600513244017334023743 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for custom SQLAlchemy types via Magnum DB.""" from oslo_db import exception as db_exc from oslo_utils import uuidutils import magnum.db.sqlalchemy.api as sa_api from magnum.db.sqlalchemy import models from magnum.tests.unit.db import base class SqlAlchemyCustomTypesTestCase(base.DbTestCase): def test_JSONEncodedDict_default_value(self): # Create ClusterTemplate w/o labels cluster_template1_id = uuidutils.generate_uuid() self.dbapi.create_cluster_template({'uuid': cluster_template1_id}) cluster_template1 = sa_api.model_query( models.ClusterTemplate).filter_by(uuid=cluster_template1_id).one() self.assertEqual({}, cluster_template1.labels) # Create ClusterTemplate with labels cluster_template2_id = uuidutils.generate_uuid() self.dbapi.create_cluster_template( {'uuid': cluster_template2_id, 'labels': {'bar': 'foo'}}) cluster_template2 = sa_api.model_query( models.ClusterTemplate).filter_by(uuid=cluster_template2_id).one() self.assertEqual('foo', cluster_template2.labels['bar']) def test_JSONEncodedDict_type_check(self): self.assertRaises(db_exc.DBError, self.dbapi.create_cluster_template, {'labels': ['this is not a dict']}) def test_JSONEncodedList_default_value(self): # Create cluster w/o master_addresses cluster1_id = uuidutils.generate_uuid() self.dbapi.create_cluster({'uuid': cluster1_id}) cluster1 = sa_api.model_query( models.Cluster).filter_by(uuid=cluster1_id).one() self.assertEqual([], cluster1.master_addresses) # Create cluster with master_addresses cluster2_id = uuidutils.generate_uuid() self.dbapi.create_cluster({'uuid': cluster2_id, 'master_addresses': ['mymaster_address1', 'mymaster_address2']}) cluster2 = sa_api.model_query( models.Cluster).filter_by(uuid=cluster2_id).one() self.assertEqual(['mymaster_address1', 'mymaster_address2'], cluster2.master_addresses) def test_JSONEncodedList_type_check(self): self.assertRaises(db_exc.DBError, self.dbapi.create_cluster, {'master_addresses': {'this is not a list': 'test'}}) magnum-6.1.0/magnum/tests/unit/db/sqlalchemy/__init__.py0000666000175100017510000000000013244017334023264 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/db/test_x509keypair.py0000666000175100017510000001046313244017334022532 0ustar zuulzuul00000000000000# Copyright 2015 NEC Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating X509KeyPairs via the DB API""" from oslo_utils import uuidutils import six from magnum.common import context from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbX509KeyPairTestCase(base.DbTestCase): def test_create_x509keypair(self): utils.create_test_x509keypair() def test_create_x509keypair_already_exists(self): utils.create_test_x509keypair() self.assertRaises(exception.X509KeyPairAlreadyExists, utils.create_test_x509keypair) def test_get_x509keypair_by_id(self): x509keypair = utils.create_test_x509keypair() res = self.dbapi.get_x509keypair_by_id(self.context, x509keypair.id) self.assertEqual(x509keypair.id, res.id) self.assertEqual(x509keypair.uuid, res.uuid) def test_get_x509keypair_by_uuid(self): x509keypair = utils.create_test_x509keypair() res = self.dbapi.get_x509keypair_by_uuid(self.context, x509keypair.uuid) self.assertEqual(x509keypair.id, res.id) self.assertEqual(x509keypair.uuid, res.uuid) def test_get_x509keypair_that_does_not_exist(self): self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_id, self.context, 999) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') def test_get_x509keypair_list(self): uuids = [] for i in range(1, 6): x509keypair = utils.create_test_x509keypair( uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(x509keypair['uuid'])) res = self.dbapi.get_x509keypair_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_x509keypair_list_by_admin_all_tenants(self): uuids = [] for i in range(1, 6): x509keypair = utils.create_test_x509keypair( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(), user_id=uuidutils.generate_uuid()) uuids.append(six.text_type(x509keypair['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_x509keypair_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_destroy_x509keypair(self): x509keypair = utils.create_test_x509keypair() self.assertIsNotNone(self.dbapi.get_x509keypair_by_id( self.context, x509keypair.id)) self.dbapi.destroy_x509keypair(x509keypair.id) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_id, self.context, x509keypair.id) def test_destroy_x509keypair_by_uuid(self): x509keypair = utils.create_test_x509keypair() self.assertIsNotNone(self.dbapi.get_x509keypair_by_uuid( self.context, x509keypair.uuid)) self.dbapi.destroy_x509keypair(x509keypair.uuid) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_uuid, self.context, x509keypair.uuid) def test_destroy_x509keypair_that_does_not_exist(self): self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.destroy_x509keypair, '12345678-9999-0000-aaaa-123456789012') magnum-6.1.0/magnum/tests/unit/db/test_cluster_template.py0000666000175100017510000002103013244017334024004 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating ClusterTemplate via the DB API""" from oslo_utils import uuidutils import six from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbClusterTemplateTestCase(base.DbTestCase): def test_create_cluster_template(self): utils.create_test_cluster_template() def test_get_cluster_template_list(self): uuids = [] for i in range(1, 6): ct = utils.create_test_cluster_template( id=i, uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(ct['uuid'])) res = self.dbapi.get_cluster_template_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_template_list_sorted(self): uuids = [] for _ in range(5): ct = utils.create_test_cluster_template( uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(ct['uuid'])) res = self.dbapi.get_cluster_template_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_cluster_template_list, self.context, sort_key='foo') def test_get_cluster_template_list_with_filters(self): ct1 = utils.create_test_cluster_template( id=1, name='ct-one', uuid=uuidutils.generate_uuid(), image_id='image1') ct2 = utils.create_test_cluster_template( id=2, name='ct-two', uuid=uuidutils.generate_uuid(), image_id='image2') res = self.dbapi.get_cluster_template_list(self.context, filters={'name': 'ct-one'}) self.assertEqual([ct1['id']], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'name': 'bad-name'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'image_id': 'image1'}) self.assertEqual([ct1['id']], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'image_id': 'image2'}) self.assertEqual([ct2['id']], [r.id for r in res]) def test_get_cluster_template_by_id(self): ct = utils.create_test_cluster_template() cluster_template = self.dbapi.get_cluster_template_by_id( self.context, ct['id']) self.assertEqual(ct['uuid'], cluster_template.uuid) def test_get_cluster_template_by_id_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) cluster_template = self.dbapi.get_cluster_template_by_id( self.context, ct['id']) self.assertEqual(ct['uuid'], cluster_template.uuid) def test_get_cluster_template_by_uuid(self): ct = utils.create_test_cluster_template() cluster_template = self.dbapi.get_cluster_template_by_uuid( self.context, ct['uuid']) self.assertEqual(ct['id'], cluster_template.id) def test_get_cluster_template_by_uuid_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) cluster_template = self.dbapi.get_cluster_template_by_uuid( self.context, ct['uuid']) self.assertEqual(ct['id'], cluster_template.id) def test_get_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_id, self.context, 666) def test_get_cluster_template_by_name(self): ct = utils.create_test_cluster_template() res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) self.assertEqual(ct['id'], res.id) self.assertEqual(ct['uuid'], res.uuid) def test_get_cluster_template_by_name_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) self.assertEqual(ct['id'], res.id) self.assertEqual(ct['uuid'], res.uuid) def test_get_cluster_template_by_name_multiple_cluster_template(self): utils.create_test_cluster_template( id=1, name='ct', uuid=uuidutils.generate_uuid(), image_id='image1') utils.create_test_cluster_template( id=2, name='ct', uuid=uuidutils.generate_uuid(), image_id='image2') self.assertRaises(exception.Conflict, self.dbapi.get_cluster_template_by_name, self.context, 'ct') def test_get_cluster_template_by_name_not_found(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_name, self.context, 'not_found') def test_get_cluster_template_by_uuid_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') def test_update_cluster_template(self): ct = utils.create_test_cluster_template() res = self.dbapi.update_cluster_template(ct['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.update_cluster_template, 666, {'name': ''}) def test_update_cluster_template_uuid(self): ct = utils.create_test_cluster_template() self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_cluster_template, ct['id'], {'uuid': 'hello'}) def test_destroy_cluster_template(self): ct = utils.create_test_cluster_template() self.dbapi.destroy_cluster_template(ct['id']) self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_id, self.context, ct['id']) def test_destroy_cluster_template_by_uuid(self): uuid = uuidutils.generate_uuid() utils.create_test_cluster_template(uuid=uuid) self.assertIsNotNone(self.dbapi.get_cluster_template_by_uuid( self.context, uuid)) self.dbapi.destroy_cluster_template(uuid) self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_uuid, self.context, uuid) def test_destroy_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.destroy_cluster_template, 666) def test_destroy_cluster_template_that_referenced_by_clusters(self): ct = utils.create_test_cluster_template() cluster = utils.create_test_cluster(cluster_template_id=ct['uuid']) self.assertEqual(ct['uuid'], cluster.cluster_template_id) self.assertRaises(exception.ClusterTemplateReferenced, self.dbapi.destroy_cluster_template, ct['id']) def test_create_cluster_template_already_exists(self): uuid = uuidutils.generate_uuid() utils.create_test_cluster_template(id=1, uuid=uuid) self.assertRaises(exception.ClusterTemplateAlreadyExists, utils.create_test_cluster_template, id=2, uuid=uuid) magnum-6.1.0/magnum/tests/unit/db/test_cluster.py0000666000175100017510000002514613244017334022125 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Clusters via the DB API""" from oslo_utils import uuidutils import six from magnum.common import context from magnum.common import exception from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbClusterTestCase(base.DbTestCase): def test_create_cluster(self): utils.create_test_cluster() def test_create_cluster_nullable_cluster_template_id(self): utils.create_test_cluster(cluster_template_id=None) def test_create_cluster_already_exists(self): utils.create_test_cluster() self.assertRaises(exception.ClusterAlreadyExists, utils.create_test_cluster) def test_get_cluster_by_id(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_id(self.context, cluster.id) self.assertEqual(cluster.id, res.id) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_by_name(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_name(self.context, cluster.name) self.assertEqual(cluster.name, res.name) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_by_uuid(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid) self.assertEqual(cluster.id, res.id) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_id, self.context, 999) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_name, self.context, 'not_found') def test_get_cluster_by_name_multiple_cluster(self): utils.create_test_cluster( id=1, name='clusterone', uuid=uuidutils.generate_uuid()) utils.create_test_cluster( id=2, name='clusterone', uuid=uuidutils.generate_uuid()) self.assertRaises(exception.Conflict, self.dbapi.get_cluster_by_name, self.context, 'clusterone') def test_get_all_cluster_stats(self): utils.create_test_cluster( id=1, name='clusterone', uuid=uuidutils.generate_uuid()) utils.create_test_cluster( id=2, name='clustertwo', uuid=uuidutils.generate_uuid()) ret = self.dbapi.get_cluster_stats(self.context) self.assertEqual(ret, (2, 12)) def test_get_one_tenant_cluster_stats(self): utils.create_test_cluster( id=1, name='clusterone', project_id='proj1', uuid=uuidutils.generate_uuid()) utils.create_test_cluster( id=2, name='clustertwo', project_id='proj2', uuid=uuidutils.generate_uuid()) ret = self.dbapi.get_cluster_stats(self.context, 'proj2') self.assertEqual(ret, (1, 6)) def test_get_cluster_list(self): uuids = [] for i in range(1, 6): cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(cluster['uuid'])) res = self.dbapi.get_cluster_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_list_sorted(self): uuids = [] for _ in range(5): cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(cluster.uuid)) res = self.dbapi.get_cluster_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_cluster_list, self.context, sort_key='foo') def test_get_cluster_list_with_filters(self): ct1 = utils.get_test_cluster_template(id=1, uuid=uuidutils.generate_uuid()) ct2 = utils.get_test_cluster_template(id=2, uuid=uuidutils.generate_uuid()) self.dbapi.create_cluster_template(ct1) self.dbapi.create_cluster_template(ct2) cluster1 = utils.create_test_cluster( name='cluster-one', uuid=uuidutils.generate_uuid(), cluster_template_id=ct1['uuid'], status=cluster_status.CREATE_IN_PROGRESS) cluster2 = utils.create_test_cluster( name='cluster-two', uuid=uuidutils.generate_uuid(), cluster_template_id=ct2['uuid'], node_count=1, master_count=1, status=cluster_status.UPDATE_IN_PROGRESS) cluster3 = utils.create_test_cluster( name='cluster-three', node_count=2, master_count=5, status=cluster_status.DELETE_IN_PROGRESS) res = self.dbapi.get_cluster_list( self.context, filters={'cluster_template_id': ct1['uuid']}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list( self.context, filters={'cluster_template_id': ct2['uuid']}) self.assertEqual([cluster2.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'name': 'cluster-one'}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'name': 'bad-cluster'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'node_count': 3}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'node_count': 1}) self.assertEqual([cluster2.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'master_count': 3}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'master_count': 1}) self.assertEqual([cluster2.id], [r.id for r in res]) filters = {'status': [cluster_status.CREATE_IN_PROGRESS, cluster_status.DELETE_IN_PROGRESS]} res = self.dbapi.get_cluster_list(self.context, filters=filters) self.assertEqual([cluster1.id, cluster3.id], [r.id for r in res]) def test_get_cluster_list_by_admin_all_tenants(self): uuids = [] for i in range(1, 6): cluster = utils.create_test_cluster( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(), user_id=uuidutils.generate_uuid()) uuids.append(six.text_type(cluster['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_cluster_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_list_cluster_template_not_exist(self): utils.create_test_cluster() self.assertEqual(1, len(self.dbapi.get_cluster_list(self.context))) res = self.dbapi.get_cluster_list(self.context, filters={ 'cluster_template_id': uuidutils.generate_uuid()}) self.assertEqual(0, len(res)) def test_destroy_cluster(self): cluster = utils.create_test_cluster() self.assertIsNotNone(self.dbapi.get_cluster_by_id(self.context, cluster.id)) self.dbapi.destroy_cluster(cluster.id) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_id, self.context, cluster.id) def test_destroy_cluster_by_uuid(self): cluster = utils.create_test_cluster() self.assertIsNotNone(self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid)) self.dbapi.destroy_cluster(cluster.uuid) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_uuid, self.context, cluster.uuid) def test_destroy_cluster_by_id_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.destroy_cluster, '12345678-9999-0000-aaaa-123456789012') def test_destroy_cluster_by_uuid_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.destroy_cluster, '999') def test_update_cluster(self): cluster = utils.create_test_cluster() old_nc = cluster.node_count new_nc = 5 self.assertNotEqual(old_nc, new_nc) res = self.dbapi.update_cluster(cluster.id, {'node_count': new_nc}) self.assertEqual(new_nc, res.node_count) def test_update_cluster_not_found(self): cluster_uuid = uuidutils.generate_uuid() self.assertRaises(exception.ClusterNotFound, self.dbapi.update_cluster, cluster_uuid, {'node_count': 5}) def test_update_cluster_uuid(self): cluster = utils.create_test_cluster() self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_cluster, cluster.id, {'uuid': ''}) magnum-6.1.0/magnum/tests/unit/db/test_federation.py0000666000175100017510000002414313244017334022560 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Federations via the DB API""" from oslo_utils import uuidutils import six from magnum.common import context from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbFederationTestCase(base.DbTestCase): def test_create_federation(self): utils.create_test_federation() def test_create_federation_already_exists(self): utils.create_test_federation() self.assertRaises(exception.FederationAlreadyExists, utils.create_test_federation) def test_get_federation_by_id(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_id(self.context, federation.id) self.assertEqual(federation.id, res.id) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_by_name(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_name(self.context, federation.name) self.assertEqual(federation.name, res.name) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_by_uuid(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_uuid(self.context, federation.uuid) self.assertEqual(federation.id, res.id) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_id, self.context, 999) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_name, self.context, 'not_found') def test_get_federation_by_name_multiple_federation(self): utils.create_test_federation(id=1, name='federation-1', uuid=uuidutils.generate_uuid()) utils.create_test_federation(id=2, name='federation-1', uuid=uuidutils.generate_uuid()) self.assertRaises(exception.Conflict, self.dbapi.get_federation_by_name, self.context, 'federation-1') def test_get_federation_list(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(federation.uuid)) res = self.dbapi.get_federation_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) def test_get_federation_list_sorted(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(federation.uuid)) res = self.dbapi.get_federation_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_federation_list, self.context, sort_key='foo') def test_get_federation_list_with_filters(self): fed1 = utils.create_test_federation( id=1, uuid=uuidutils.generate_uuid(), name='fed1', project_id='proj1', hostcluster_id='master1', member_ids=['member1', 'member2'], properties={'dns-zone': 'fed1.com.'}) fed2 = utils.create_test_federation( id=2, uuid=uuidutils.generate_uuid(), name='fed', project_id='proj2', hostcluster_id='master2', member_ids=['member3', 'member4'], properties={"dns-zone": "fed2.com."}) # NOTE(clenimar): we are specifying a project_id to the test # resources above, which means that our current context # (self.context) will not be able to see these resources. # Create an admin context in order to test the queries: ctx = context.make_admin_context(all_tenants=True) # Filter by name: res = self.dbapi.get_federation_list(ctx, filters={'name': 'fed1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'name': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by project_id res = self.dbapi.get_federation_list(ctx, filters={'project_id': 'proj1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'project_id': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by hostcluster_id res = self.dbapi.get_federation_list(ctx, filters={ 'hostcluster_id': 'master1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={ 'hostcluster_id': 'master2'}) self.assertEqual([fed2.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'hostcluster_id': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by member_ids (please note that it is currently implemented # as an exact match. So it will only return federations whose member # clusters are exactly those passed as a filter) res = self.dbapi.get_federation_list( ctx, filters={'member_ids': ['member1', 'member2']}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list( ctx, filters={'member_ids': ['foo']}) self.assertEqual([], [r.id for r in res]) # Filter by properties res = self.dbapi.get_federation_list( ctx, filters={ 'properties': {'dns-zone': 'fed2.com.'} }) self.assertEqual([fed2.id], [r.id for r in res]) res = self.dbapi.get_federation_list( ctx, filters={ 'properties': {'dns-zone': 'foo.bar.'} }) self.assertEqual([], [r.id for r in res]) def test_get_federation_list_by_admin_all_tenants(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) uuids.append(six.text_type(federation['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_federation_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(len(res), 5) self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_destroy_federation(self): federation = utils.create_test_federation() self.assertIsNotNone( self.dbapi.get_federation_by_id(self.context, federation.id)) self.dbapi.destroy_federation(federation.id) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_id, self.context, federation.id) def test_destroy_federation_by_uuid(self): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) self.assertIsNotNone( self.dbapi.get_federation_by_uuid(self.context, federation.uuid)) self.dbapi.destroy_federation(federation.uuid) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_uuid, self.context, federation.uuid) def test_destroy_federation_by_id_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.destroy_federation, '12345678-9999-0000-aaaa-123456789012') def test_destroy_federation_by_uudid_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.destroy_federation, '15') def test_update_federation_members(self): federation = utils.create_test_federation() old_members = federation.member_ids new_members = old_members + ['new-member-id'] self.assertNotEqual(old_members, new_members) res = self.dbapi.update_federation(federation.id, {'member_ids': new_members}) self.assertEqual(new_members, res.member_ids) def test_update_federation_properties(self): federation = utils.create_test_federation() old_properties = federation.properties new_properties = { 'dns-zone': 'new.domain.com.' } self.assertNotEqual(old_properties, new_properties) res = self.dbapi.update_federation(federation.id, {'properties': new_properties}) self.assertEqual(new_properties, res.properties) def test_update_federation_not_found(self): federation_uuid = uuidutils.generate_uuid() self.assertRaises(exception.FederationNotFound, self.dbapi.update_federation, federation_uuid, {'member_ids': ['foo']}) magnum-6.1.0/magnum/tests/unit/db/test_magnum_service.py0000666000175100017510000000767013244017334023452 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating MagnumService via the DB API""" from magnum.common import context # NOQA from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbMagnumServiceTestCase(base.DbTestCase): def test_create_magnum_service(self): utils.create_test_magnum_service() def test_create_magnum_service_failure_for_dup(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( ms['host'], ms['binary']) self.assertEqual(ms.id, res.id) def test_get_magnum_service_by_host_and_binary(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( ms['host'], ms['binary']) self.assertEqual(ms.id, res.id) def test_get_magnum_service_by_host_and_binary_failure(self): utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost1', 'fake-bin1') self.assertIsNone(res) def test_update_magnum_service(self): ms = utils.create_test_magnum_service() d2 = True update = {'disabled': d2} ms1 = self.dbapi.update_magnum_service(ms['id'], update) self.assertEqual(ms['id'], ms1['id']) self.assertEqual(d2, ms1['disabled']) res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertEqual(ms1['id'], res['id']) self.assertEqual(d2, res['disabled']) def test_update_magnum_service_failure(self): ms = utils.create_test_magnum_service() fake_update = {'fake_field': 'fake_value'} self.assertRaises(exception.MagnumServiceNotFound, self.dbapi.update_magnum_service, ms['id'] + 1, fake_update) def test_destroy_magnum_service(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertEqual(res['id'], ms['id']) self.dbapi.destroy_magnum_service(ms['id']) res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertIsNone(res) def test_destroy_magnum_service_failure(self): ms = utils.create_test_magnum_service() self.assertRaises(exception.MagnumServiceNotFound, self.dbapi.destroy_magnum_service, ms['id'] + 1) def test_get_magnum_service_list(self): fake_ms_params = { 'report_count': 1010, 'host': 'FakeHost', 'binary': 'FakeBin', 'disabled': False, 'disabled_reason': 'FakeReason' } utils.create_test_magnum_service(**fake_ms_params) res = self.dbapi.get_magnum_service_list() self.assertEqual(1, len(res)) res = res[0] for k, v in fake_ms_params.items(): self.assertEqual(res[k], v) fake_ms_params['binary'] = 'FakeBin1' fake_ms_params['disabled'] = True utils.create_test_magnum_service(**fake_ms_params) res = self.dbapi.get_magnum_service_list(disabled=True) self.assertEqual(1, len(res)) res = res[0] for k, v in fake_ms_params.items(): self.assertEqual(res[k], v) magnum-6.1.0/magnum/tests/unit/db/base.py0000666000175100017510000000416613244017334020316 0ustar zuulzuul00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum DB test base class.""" import fixtures import magnum.conf from magnum.db import api as dbapi from magnum.db.sqlalchemy import api as sqla_api from magnum.db.sqlalchemy import migration from magnum.db.sqlalchemy import models from magnum.tests import base CONF = magnum.conf.CONF _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, db_api, db_migrate, sql_connection): self.sql_connection = sql_connection self.engine = db_api.get_engine() self.engine.dispose() conn = self.engine.connect() self.setup_sqlite(db_migrate) self.post_migrations() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setup_sqlite(self, db_migrate): if db_migrate.version(): return models.Base.metadata.create_all(self.engine) db_migrate.stamp('head') def _setUp(self): conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" class DbTestCase(base.TestCase): def setUp(self): super(DbTestCase, self).setUp() self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) magnum-6.1.0/magnum/tests/unit/db/__init__.py0000666000175100017510000000000013244017334021122 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/tests/unit/db/test_quota.py0000666000175100017510000001547313244017334021577 0ustar zuulzuul00000000000000# Copyright 2016 Yahoo! Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Quota via the DB API""" from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbQuotaTestCase(base.DbTestCase): def test_create_quota(self): utils.create_test_quotas() def test_create_quota_already_exists(self): utils.create_test_quotas() self.assertRaises(exception.QuotaAlreadyExists, utils.create_test_quotas) def test_get_quota_all(self): q = utils.create_test_quotas() res = self.dbapi.quota_get_all_by_project_id( project_id='fake_project') for r in res: self.assertEqual(q.id, r.id) self.assertEqual(q.hard_limit, r.hard_limit) self.assertEqual(q.project_id, r.project_id) self.assertEqual(q.resource, r.resource) def test_get_quota_by_project_id_resource(self): q = utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) def test_get_quota_by_project_id_resource_not_found(self): utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') def test_get_quota_list(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) res = self.dbapi.get_quota_list(self.context) res_proj_ids = [r.project_id for r in res] self.assertEqual(sorted(project_ids), sorted(res_proj_ids)) def test_get_quota_list_sorted(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) res = self.dbapi.get_quota_list(self.context, sort_key='project_id') res_proj_ids = [r.project_id for r in res] self.assertEqual(sorted(project_ids), res_proj_ids) def test_get_quota_list_invalid_sort_key(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_quota_list, self.context, sort_key='invalid') def test_get_quota_list_with_filters(self): quota1 = utils.create_test_quotas(project_id='proj-1', resource='res1') quota2 = utils.create_test_quotas(project_id='proj-1', resource='res2') quota3 = utils.create_test_quotas(project_id='proj-2', resource='res1') res = self.dbapi.get_quota_list( self.context, filters={'resource': 'res2'}) self.assertEqual(quota2.project_id, res[0].project_id) res = self.dbapi.get_quota_list( self.context, filters={'project_id': 'proj-2'}) self.assertEqual(quota3.project_id, res[0].project_id) res = self.dbapi.get_quota_list( self.context, filters={'project_id': 'proj-1'}) self.assertEqual(sorted([quota1.project_id, quota2.project_id]), sorted([r.project_id for r in res])) def test_update_quota(self): q = utils.create_test_quotas(hard_limit=5, project_id='1234', resource='Cluster') res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) quota_dict = {'resource': 'Cluster', 'hard_limit': 15} self.dbapi.update_quota('1234', quota_dict) res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') self.assertEqual(quota_dict['hard_limit'], res.hard_limit) self.assertEqual(quota_dict['resource'], res.resource) def test_update_quota_not_found(self): utils.create_test_quotas(hard_limit=5, project_id='1234', resource='Cluster') quota_dict = {'resource': 'Cluster', 'hard_limit': 15} self.assertRaises(exception.QuotaNotFound, self.dbapi.update_quota, 'invalid_proj', quota_dict) def test_delete_quota(self): q = utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) self.dbapi.delete_quota(q.project_id, q.resource) self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') def test_delete_quota_that_does_not_exist(self): # Make sure that quota does not exist self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') # Now try to delete non-existing quota self.assertRaises(exception.QuotaNotFound, self.dbapi.delete_quota, project_id='123', resource='bad-res') magnum-6.1.0/magnum/tests/unit/db/utils.py0000666000175100017510000002453013244017334020541 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum test utilities.""" from magnum.db import api as db_api def get_test_cluster_template(**kw): return { 'id': kw.get('id', 32), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'name': kw.get('name', 'clustermodel1'), 'image_id': kw.get('image_id', 'ubuntu'), 'flavor_id': kw.get('flavor_id', 'm1.small'), 'master_flavor_id': kw.get('master_flavor_id', 'm1.small'), 'keypair_id': kw.get('keypair_id', 'keypair1'), 'external_network_id': kw.get('external_network_id', 'd1f02cfb-d27f-4068-9332-84d907cb0e2e'), 'fixed_network': kw.get('fixed_network', 'private'), 'fixed_subnet': kw.get('fixed_network', 'private-subnet'), 'network_driver': kw.get('network_driver'), 'volume_driver': kw.get('volume_driver'), 'dns_nameserver': kw.get('dns_nameserver', '8.8.1.1'), 'apiserver_port': kw.get('apiserver_port', 8080), 'docker_volume_size': kw.get('docker_volume_size', 20), 'docker_storage_driver': kw.get('docker_storage_driver', 'devicemapper'), 'cluster_distro': kw.get('cluster_distro', 'fedora-atomic'), 'coe': kw.get('coe', 'swarm'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), 'labels': kw.get('labels', {'key1': 'val1', 'key2': 'val2'}), 'http_proxy': kw.get('http_proxy', 'fake_http_proxy'), 'https_proxy': kw.get('https_proxy', 'fake_https_proxy'), 'no_proxy': kw.get('no_proxy', 'fake_no_proxy'), 'registry_enabled': kw.get('registry_enabled', False), 'tls_disabled': kw.get('tls_disabled', False), 'public': kw.get('public', False), 'server_type': kw.get('server_type', 'vm'), 'insecure_registry': kw.get('insecure_registry', '10.0.0.1:5000'), 'master_lb_enabled': kw.get('master_lb_enabled', True), 'floating_ip_enabled': kw.get('floating_ip_enabled', True), } def create_test_cluster_template(**kw): """Create and return test ClusterTemplate DB object. Function to be used to create test ClusterTemplate objects in the database. :param kw: kwargs with overriding values for ClusterTemplate's attributes. :returns: Test ClusterTemplate DB object. """ cluster_template = get_test_cluster_template(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del cluster_template['id'] dbapi = db_api.get_instance() return dbapi.create_cluster_template(cluster_template) def get_test_cluster(**kw): attrs = { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'name': kw.get('name', 'cluster1'), 'discovery_url': kw.get('discovery_url', None), 'ca_cert_ref': kw.get('ca_cert_ref', None), 'magnum_cert_ref': kw.get('magnum_cert_ref', None), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'cluster_template_id': kw.get('cluster_template_id', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'), 'status': kw.get('status', 'CREATE_IN_PROGRESS'), 'status_reason': kw.get('status_reason', 'Completed successfully'), 'create_timeout': kw.get('create_timeout', 60), 'api_address': kw.get('api_address', '172.17.2.3'), 'node_addresses': kw.get('node_addresses', ['172.17.2.4']), 'node_count': kw.get('node_count', 3), 'master_count': kw.get('master_count', 3), 'master_addresses': kw.get('master_addresses', ['172.17.2.18']), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), 'docker_volume_size': kw.get('docker_volume_size'), 'labels': kw.get('labels'), 'master_flavor_id': kw.get('master_flavor_id', None), 'flavor_id': kw.get('flavor_id', None), } # Only add Keystone trusts related attributes on demand since they may # break other tests. for attr in ['trustee_username', 'trustee_password', 'trust_id']: if attr in kw: attrs[attr] = kw[attr] return attrs def create_test_cluster(**kw): """Create test cluster entry in DB and return Cluster DB object. Function to be used to create test Cluster objects in the database. :param kw: kwargs with overriding values for cluster's attributes. :returns: Test Cluster DB object. """ cluster = get_test_cluster(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del cluster['id'] dbapi = db_api.get_instance() return dbapi.create_cluster(cluster) def get_test_quota(**kw): attrs = { 'id': kw.get('id', 42), 'project_id': kw.get('project_id', 'fake_project'), 'resource': kw.get('resource', 'Cluster'), 'hard_limit': kw.get('hard_limit', 10) } return attrs def create_test_quota(**kw): """Create test quota entry in DB and return Quota DB object. Function to be used to create test Quota objects in the database. :param kw: kwargs with overriding values for quota's attributes. :returns: Test Quota DB object. """ quota = get_test_quota(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del quota['id'] dbapi = db_api.get_instance() return dbapi.create_quota(quota) def get_test_x509keypair(**kw): return { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '72625085-c507-4410-9b28-cd7cf1fbf1ad'), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'certificate': kw.get('certificate', 'certificate'), 'private_key': kw.get('private_key', 'private_key'), 'private_key_passphrase': kw.get('private_key_passphrase', 'private_key_passphrase'), 'intermediates': kw.get('intermediates', 'intermediates'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_x509keypair(**kw): """Create test x509keypair entry in DB and return X509KeyPair DB object. Function to be used to create test X509KeyPair objects in the database. :param kw: kwargs with overriding values for x509keypair's attributes. :returns: Test X509KeyPair DB object. """ x509keypair = get_test_x509keypair(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del x509keypair['id'] dbapi = db_api.get_instance() return dbapi.create_x509keypair(x509keypair) def get_test_magnum_service(**kw): return { 'id': kw.get('', 13), 'report_count': kw.get('report_count', 13), 'host': kw.get('host', 'fakehost'), 'binary': kw.get('binary', 'fake-bin'), 'disabled': kw.get('disabled', False), 'disabled_reason': kw.get('disabled_reason', 'fake-reason'), 'forced_down': kw.get('forced_down', False), 'last_seen_up': kw.get('last_seen_up'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_magnum_service(**kw): """Create test magnum_service entry in DB and return magnum_service DB object. :param kw: kwargs with overriding values for magnum_service's attributes. :returns: Test magnum_service DB object. """ magnum_service = get_test_magnum_service(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del magnum_service['id'] dbapi = db_api.get_instance() return dbapi.create_magnum_service(magnum_service) def get_test_quotas(**kw): return { 'id': kw.get('', 18), 'project_id': kw.get('project_id', 'fake_project'), 'resource': kw.get('resource', 'Cluster'), 'hard_limit': kw.get('hard_limit', 10), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_quotas(**kw): """Create test quotas entry in DB and return quotas DB object. :param kw: kwargs with overriding values for quota attributes. :returns: Test quotas DB object. """ quotas = get_test_quotas(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del quotas['id'] dbapi = db_api.get_instance() return dbapi.create_quota(quotas) def get_test_federation(**kw): return { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '60d6dbdc-9951-4cee-b020-55d3e15a749b'), 'name': kw.get('name', 'fake-name'), 'project_id': kw.get('project_id', 'fake_project'), 'hostcluster_id': kw.get('hostcluster_id', 'fake_master'), 'member_ids': kw.get('member_ids', ['fake_member1', 'fake_member2']), 'properties': kw.get('properties', {'dns-zone': 'example.com.'}), 'status': kw.get('status', 'CREATE_IN_PROGRESS'), 'status_reason': kw.get('status_reason', 'Completed successfully.'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at') } def create_test_federation(**kw): """Create test federation entry in DB and return federation DB object. :param kw: kwargs with overriding values for federation attributes. :return: Test quotas DB object. """ federation = get_test_federation(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del federation['id'] dbapi = db_api.get_instance() return dbapi.create_federation(federation) magnum-6.1.0/magnum/tests/fake_notifier.py0000666000175100017510000000317713244017334020646 0ustar zuulzuul00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple('Message', [ 'publisher_id', 'priority', 'event_type', 'payload', 'context']) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, driver=None, topic=None, serializer=None, retry=None): self.transport = transport self.publisher_id = publisher_id or 'fake.id' for priority in ('debug', 'info', 'warn', 'error', 'critical'): setattr( self, priority, functools.partial(self._notify, priority=priority.upper())) def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id=publisher_id) def _notify(self, ctxt, event_type, payload, priority): msg = FakeMessage(self.publisher_id, priority, event_type, payload, ctxt) NOTIFICATIONS.append(msg) magnum-6.1.0/magnum/tests/utils.py0000666000175100017510000000141313244017334017170 0ustar zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.common import context as magnum_context def dummy_context(user='test_username', project_id='test_tenant_id'): return magnum_context.RequestContext(user=user, project_id=project_id) magnum-6.1.0/magnum/__init__.py0000666000175100017510000000133513244017334016430 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import pbr.version __version__ = pbr.version.VersionInfo( 'magnum').version_string() # Make a project global TLS trace storage repository TLS = threading.local() magnum-6.1.0/magnum/db/0000775000175100017510000000000013244017675014710 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/db/sqlalchemy/0000775000175100017510000000000013244017675017052 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/db/sqlalchemy/migration.py0000666000175100017510000000463013244017334021412 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_db.sqlalchemy.migration_cli import manager import magnum.conf CONF = magnum.conf.CONF _MANAGER = None def get_manager(): global _MANAGER if not _MANAGER: alembic_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'alembic.ini')) migrate_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'alembic')) migration_config = {'alembic_ini_path': alembic_path, 'alembic_repo_path': migrate_path, 'db_url': CONF.database.connection} _MANAGER = manager.MigrationManager(migration_config) return _MANAGER def version(): """Current database version. :returns: Database version :rtype: string """ return get_manager().version() def upgrade(version): """Used for upgrading database. :param version: Desired database version :type version: string """ version = version or 'head' get_manager().upgrade(version) def stamp(revision): """Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ get_manager().stamp(revision) def revision(message=None, autogenerate=False): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ return get_manager().revision(message=message, autogenerate=autogenerate) magnum-6.1.0/magnum/db/sqlalchemy/alembic.ini0000666000175100017510000000171713244017334021147 0ustar zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S magnum-6.1.0/magnum/db/sqlalchemy/api.py0000666000175100017510000007100013244017334020165 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.orm.exc import MultipleResultsFound from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.sql import func from magnum.common import clients from magnum.common import context as request_context from magnum.common import exception import magnum.conf from magnum.db import api from magnum.db.sqlalchemy import models from magnum.i18n import _ profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') CONF = magnum.conf.CONF LOG = log.getLogger(__name__) _FACADE = None def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade.from_config(CONF) if profiler_sqlalchemy: if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: profiler_sqlalchemy.add_tracing(sa, _FACADE.get_engine(), "db") return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def get_backend(): """The backend is this module itself.""" return Connection() def model_query(model, *args, **kwargs): """Query helper for simpler session usage. :param session: if present, the session to use """ session = kwargs.get('session') or get_session() query = session.query(model, *args) return query def add_identity_filter(query, value): """Adds an identity filter to a query. Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by UUID. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if strutils.is_int_like(value): return query.filter_by(id=value) elif uuidutils.is_uuid_like(value): return query.filter_by(uuid=value) else: raise exception.InvalidIdentity(identity=value) def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): if not query: query = model_query(model) sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) try: query = db_utils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) except db_exc.InvalidSortKey: raise exception.InvalidParameterValue( _('The sort_key value "%(key)s" is an invalid field for sorting') % {'key': sort_key}) return query.all() class Connection(api.Connection): """SqlAlchemy connection.""" def __init__(self): pass def _add_tenant_filters(self, context, query): if context.is_admin and context.all_tenants: return query admin_context = request_context.make_admin_context(all_tenants=True) osc = clients.OpenStackClients(admin_context) kst = osc.keystone() # User in a regular project (not in the trustee domain) if context.project_id and context.domain_id != kst.trustee_domain_id: query = query.filter_by(project_id=context.project_id) # Match project ID component in trustee user's user name against # cluster's project_id to associate per-cluster trustee users who have # no project information with the project their clusters/cluster models # reside in. This is equivalent to the project filtering above. elif context.domain_id == kst.trustee_domain_id: user_name = kst.client.users.get(context.user_id).name user_project = user_name.split('_', 2)[1] query = query.filter_by(project_id=user_project) else: query = query.filter_by(user_id=context.user_id) return query def _add_clusters_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["cluster_template_id", "name", "node_count", "master_count", "stack_id", "api_address", "node_addresses", "project_id", "user_id"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) if 'status' in filters: query = query.filter(models.Cluster.status.in_(filters['status'])) return query def get_cluster_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.Cluster) query = self._add_tenant_filters(context, query) query = self._add_clusters_filters(query, filters) return _paginate_query(models.Cluster, limit, marker, sort_key, sort_dir, query) def create_cluster(self, values): # ensure defaults are present for new clusters if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster = models.Cluster() cluster.update(values) try: cluster.save() except db_exc.DBDuplicateEntry: raise exception.ClusterAlreadyExists(uuid=values['uuid']) return cluster def get_cluster_by_id(self, context, cluster_id): query = model_query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(id=cluster_id) try: return query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) def get_cluster_by_name(self, context, cluster_name): query = model_query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(name=cluster_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict('Multiple clusters exist with same name.' ' Please use the cluster uuid instead.') except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_name) def get_cluster_by_uuid(self, context, cluster_uuid): query = model_query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=cluster_uuid) try: return query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_uuid) def get_cluster_stats(self, context, project_id=None): query = model_query(models.Cluster) node_count_col = models.Cluster.node_count master_count_col = models.Cluster.master_count ncfunc = func.sum(node_count_col + master_count_col) if project_id: query = query.filter_by(project_id=project_id) nquery = query.session.query(ncfunc.label("nodes")).filter_by( project_id=project_id) else: nquery = query.session.query(ncfunc.label("nodes")) clusters = query.count() nodes = int(nquery.one()[0]) if nquery.one()[0] else 0 return clusters, nodes def get_cluster_count_all(self, context, filters=None): query = model_query(models.Cluster) query = self._add_tenant_filters(context, query) query = self._add_clusters_filters(query, filters) return query.count() def destroy_cluster(self, cluster_id): session = get_session() with session.begin(): query = model_query(models.Cluster, session=session) query = add_identity_filter(query, cluster_id) try: query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) query.delete() def update_cluster(self, cluster_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Cluster.") raise exception.InvalidParameterValue(err=msg) return self._do_update_cluster(cluster_id, values) def _do_update_cluster(self, cluster_id, values): session = get_session() with session.begin(): query = model_query(models.Cluster, session=session) query = add_identity_filter(query, cluster_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) ref.update(values) return ref def _add_cluster_template_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["name", "image_id", "flavor_id", "master_flavor_id", "keypair_id", "external_network_id", "dns_nameserver", "project_id", "user_id", "labels"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} return query.filter_by(**filter_dict) def get_cluster_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) query = self._add_cluster_template_filters(query, filters) # include public ClusterTemplates public_q = model_query(models.ClusterTemplate).filter_by(public=True) query = query.union(public_q) return _paginate_query(models.ClusterTemplate, limit, marker, sort_key, sort_dir, query) def create_cluster_template(self, values): # ensure defaults are present for new ClusterTemplates if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster_template = models.ClusterTemplate() cluster_template.update(values) try: cluster_template.save() except db_exc.DBDuplicateEntry: raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid']) return cluster_template def get_cluster_template_by_id(self, context, cluster_template_id): query = model_query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = model_query(models.ClusterTemplate).filter_by(public=True) query = query.union(public_q) query = query.filter_by(id=cluster_template_id) try: return query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) def get_cluster_template_by_uuid(self, context, cluster_template_uuid): query = model_query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = model_query(models.ClusterTemplate).filter_by(public=True) query = query.union(public_q) query = query.filter_by(uuid=cluster_template_uuid) try: return query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_uuid) def get_cluster_template_by_name(self, context, cluster_template_name): query = model_query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = model_query(models.ClusterTemplate).filter_by(public=True) query = query.union(public_q) query = query.filter_by(name=cluster_template_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict('Multiple ClusterTemplates exist with' ' same name. Please use the ' 'ClusterTemplate uuid instead.') except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_name) def _is_cluster_template_referenced(self, session, cluster_template_uuid): """Checks whether the ClusterTemplate is referenced by cluster(s).""" query = model_query(models.Cluster, session=session) query = self._add_clusters_filters(query, {'cluster_template_id': cluster_template_uuid}) return query.count() != 0 def _is_publishing_cluster_template(self, values): if (len(values) == 1 and 'public' in values and values['public'] is True): return True return False def destroy_cluster_template(self, cluster_template_id): session = get_session() with session.begin(): query = model_query(models.ClusterTemplate, session=session) query = add_identity_filter(query, cluster_template_id) try: cluster_template_ref = query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) if self._is_cluster_template_referenced( session, cluster_template_ref['uuid']): raise exception.ClusterTemplateReferenced( clustertemplate=cluster_template_id) query.delete() def update_cluster_template(self, cluster_template_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing ClusterTemplate.") raise exception.InvalidParameterValue(err=msg) return self._do_update_cluster_template(cluster_template_id, values) def _do_update_cluster_template(self, cluster_template_id, values): session = get_session() with session.begin(): query = model_query(models.ClusterTemplate, session=session) query = add_identity_filter(query, cluster_template_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) if self._is_cluster_template_referenced(session, ref['uuid']): # we only allow to update ClusterTemplate to be public if not self._is_publishing_cluster_template(values): raise exception.ClusterTemplateReferenced( clustertemplate=cluster_template_id) ref.update(values) return ref def create_x509keypair(self, values): # ensure defaults are present for new x509keypairs if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() x509keypair = models.X509KeyPair() x509keypair.update(values) try: x509keypair.save() except db_exc.DBDuplicateEntry: raise exception.X509KeyPairAlreadyExists(uuid=values['uuid']) return x509keypair def get_x509keypair_by_id(self, context, x509keypair_id): query = model_query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = query.filter_by(id=x509keypair_id) try: return query.one() except NoResultFound: raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) def get_x509keypair_by_uuid(self, context, x509keypair_uuid): query = model_query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=x509keypair_uuid) try: return query.one() except NoResultFound: raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid) def destroy_x509keypair(self, x509keypair_id): session = get_session() with session.begin(): query = model_query(models.X509KeyPair, session=session) query = add_identity_filter(query, x509keypair_id) count = query.delete() if count != 1: raise exception.X509KeyPairNotFound(x509keypair_id) def update_x509keypair(self, x509keypair_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing X509KeyPair.") raise exception.InvalidParameterValue(err=msg) return self._do_update_x509keypair(x509keypair_id, values) def _do_update_x509keypair(self, x509keypair_id, values): session = get_session() with session.begin(): query = model_query(models.X509KeyPair, session=session) query = add_identity_filter(query, x509keypair_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) ref.update(values) return ref def _add_x509keypairs_filters(self, query, filters): if filters is None: filters = {} if 'project_id' in filters: query = query.filter_by(project_id=filters['project_id']) if 'user_id' in filters: query = query.filter_by(user_id=filters['user_id']) return query def get_x509keypair_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = self._add_x509keypairs_filters(query, filters) return _paginate_query(models.X509KeyPair, limit, marker, sort_key, sort_dir, query) def destroy_magnum_service(self, magnum_service_id): session = get_session() with session.begin(): query = model_query(models.MagnumService, session=session) query = add_identity_filter(query, magnum_service_id) count = query.delete() if count != 1: raise exception.MagnumServiceNotFound( magnum_service_id=magnum_service_id) def update_magnum_service(self, magnum_service_id, values): session = get_session() with session.begin(): query = model_query(models.MagnumService, session=session) query = add_identity_filter(query, magnum_service_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.MagnumServiceNotFound( magnum_service_id=magnum_service_id) if 'report_count' in values: if values['report_count'] > ref.report_count: ref.last_seen_up = timeutils.utcnow() ref.update(values) return ref def get_magnum_service_by_host_and_binary(self, host, binary): query = model_query(models.MagnumService) query = query.filter_by(host=host, binary=binary) try: return query.one() except NoResultFound: return None def create_magnum_service(self, values): magnum_service = models.MagnumService() magnum_service.update(values) try: magnum_service.save() except db_exc.DBDuplicateEntry: host = values["host"] binary = values["binary"] LOG.warning("Magnum service with same host:%(host)s and" " binary:%(binary)s had been saved into DB", {'host': host, 'binary': binary}) query = model_query(models.MagnumService) query = query.filter_by(host=host, binary=binary) return query.one() return magnum_service def get_magnum_service_list(self, disabled=None, limit=None, marker=None, sort_key=None, sort_dir=None ): query = model_query(models.MagnumService) if disabled: query = query.filter_by(disabled=disabled) return _paginate_query(models.MagnumService, limit, marker, sort_key, sort_dir, query) def create_quota(self, values): quotas = models.Quota() quotas.update(values) try: quotas.save() except db_exc.DBDuplicateEntry: raise exception.QuotaAlreadyExists(project_id=values['project_id'], resource=values['resource']) return quotas def _add_quota_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["resource", "project_id"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) return query def get_quota_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.Quota) query = self._add_quota_filters(query, filters) return _paginate_query(models.Quota, limit, marker, sort_key, sort_dir, query) def update_quota(self, project_id, values): session = get_session() with session.begin(): query = model_query(models.Quota, session=session) resource = values['resource'] try: query = query.filter_by(project_id=project_id).filter_by( resource=resource) ref = query.with_lockmode('update').one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) ref.update(values) return ref def delete_quota(self, project_id, resource): session = get_session() with session.begin(): query = model_query(models.Quota, session=session) try: query.filter_by(project_id=project_id).filter_by( resource=resource).one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) query.delete() def get_quota_by_id(self, context, quota_id): query = model_query(models.Quota) query = query.filter_by(id=quota_id) try: return query.one() except NoResultFound: msg = _('quota id %s .') % quota_id raise exception.QuotaNotFound(msg=msg) def quota_get_all_by_project_id(self, project_id): query = model_query(models.Quota) result = query.filter_by(project_id=project_id).all() return result def get_quota_by_project_id_resource(self, project_id, resource): query = model_query(models.Quota) query = query.filter_by(project_id=project_id).filter_by( resource=resource) try: return query.one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) def _add_federation_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["name", "project_id", "hostcluster_id", "member_ids", "properties"] # TODO(clenimar): implement 'member_ids' filter as a contains query, # so we return all the federations that have the given clusters, # instead of all the federations that *only* have the exact given # clusters. filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) if 'status' in filters: query = query.filter( models.Federation.status.in_(filters['status'])) return query def get_federation_by_id(self, context, federation_id): query = model_query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(id=federation_id) try: return query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) def get_federation_by_uuid(self, context, federation_uuid): query = model_query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=federation_uuid) try: return query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_uuid) def get_federation_by_name(self, context, federation_name): query = model_query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(name=federation_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict('Multiple federations exist with same ' 'name. Please use the federation uuid ' 'instead.') except NoResultFound: raise exception.FederationNotFound(federation=federation_name) def get_federation_list(self, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): query = model_query(models.Federation) query = self._add_tenant_filters(context, query) query = self._add_federation_filters(query, filters) return _paginate_query(models.Federation, limit, marker, sort_key, sort_dir, query) def create_federation(self, values): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() federation = models.Federation() federation.update(values) try: federation.save() except db_exc.DBDuplicateEntry: raise exception.FederationAlreadyExists(uuid=values['uuid']) return federation def destroy_federation(self, federation_id): session = get_session() with session.begin(): query = model_query(models.Federation, session=session) query = add_identity_filter(query, federation_id) try: query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) query.delete() def update_federation(self, federation_id, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Federation.") raise exception.InvalidParameterValue(err=msg) return self._do_update_federation(federation_id, values) def _do_update_federation(self, federation_id, values): session = get_session() with session.begin(): query = model_query(models.Federation, session=session) query = add_identity_filter(query, federation_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) ref.update(values) return ref magnum-6.1.0/magnum/db/sqlalchemy/models.py0000666000175100017510000002036213244017343020704 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for container service """ import json from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Integer from sqlalchemy import schema from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator, TEXT import magnum.conf CONF = magnum.conf.CONF def table_args(): engine_name = urlparse.urlparse(CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None class JsonEncodedType(TypeDecorator): """Abstract base type serialized as json-encoded string in db.""" type = None impl = TEXT def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError("%(class)s supposes to store " "%(type)s objects, but %(value)s " "given" % {'class': self.__class__.__name__, 'type': self.type.__name__, 'value': type(value).__name__}) serialized_value = json.dumps(value) return serialized_value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value class JSONEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db.""" type = dict class JSONEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db.""" type = list class MagnumBase(models.TimestampMixin, models.ModelBase): metadata = None def as_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d def save(self, session=None): import magnum.db.sqlalchemy.api as db_api if session is None: session = db_api.get_session() super(MagnumBase, self).save(session) Base = declarative_base(cls=MagnumBase) class Cluster(Base): """Represents a Cluster.""" __tablename__ = 'cluster' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_bay0uuid'), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) user_id = Column(String(255)) uuid = Column(String(36)) name = Column(String(255)) cluster_template_id = Column(String(255)) keypair = Column(String(255)) docker_volume_size = Column(Integer()) labels = Column(JSONEncodedDict) master_flavor_id = Column(String(255)) flavor_id = Column(String(255)) stack_id = Column(String(255)) api_address = Column(String(255)) node_addresses = Column(JSONEncodedList) node_count = Column(Integer()) master_count = Column(Integer()) status = Column(String(20)) status_reason = Column(Text) create_timeout = Column(Integer()) discovery_url = Column(String(255)) master_addresses = Column(JSONEncodedList) # TODO(wanghua): encrypt trust_id in db trust_id = Column(String(255)) trustee_username = Column(String(255)) trustee_user_id = Column(String(255)) # TODO(wanghua): encrypt trustee_password in db trustee_password = Column(String(255)) coe_version = Column(String(255)) container_version = Column(String(255)) # (yuanying) if we use barbican, # cert_ref size is determined by below format # * http(s)://${DOMAIN_NAME}/v1/containers/${UUID} # as a result, cert_ref length is estimated to 312 chars. # but we can use another backend to store certs. # so, we use 512 chars to get some buffer. ca_cert_ref = Column(String(512)) magnum_cert_ref = Column(String(512)) class ClusterTemplate(Base): """Represents a ClusterTemplate.""" __tablename__ = 'cluster_template' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_baymodel0uuid'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) project_id = Column(String(255)) user_id = Column(String(255)) name = Column(String(255)) image_id = Column(String(255)) flavor_id = Column(String(255)) master_flavor_id = Column(String(255)) keypair_id = Column(String(255)) external_network_id = Column(String(255)) fixed_network = Column(String(255)) fixed_subnet = Column(String(255)) network_driver = Column(String(255)) volume_driver = Column(String(255)) dns_nameserver = Column(String(255)) apiserver_port = Column(Integer()) docker_volume_size = Column(Integer()) docker_storage_driver = Column(String(255)) cluster_distro = Column(String(255)) coe = Column(String(255)) http_proxy = Column(String(255)) https_proxy = Column(String(255)) no_proxy = Column(String(255)) registry_enabled = Column(Boolean, default=False) labels = Column(JSONEncodedDict) tls_disabled = Column(Boolean, default=False) public = Column(Boolean, default=False) server_type = Column(String(255)) insecure_registry = Column(String(255)) master_lb_enabled = Column(Boolean, default=False) floating_ip_enabled = Column(Boolean, default=True) class X509KeyPair(Base): """X509KeyPair""" __tablename__ = 'x509keypair' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_x509keypair0uuid'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) certificate = Column(Text()) private_key = Column(Text()) private_key_passphrase = Column(Text()) intermediates = Column(Text()) project_id = Column(String(255)) user_id = Column(String(255)) class MagnumService(Base): """Represents health status of various magnum services""" __tablename__ = 'magnum_service' __table_args__ = ( schema.UniqueConstraint("host", "binary", name="uniq_magnum_service0host0binary"), table_args() ) id = Column(Integer, primary_key=True) host = Column(String(255)) binary = Column(String(255)) disabled = Column(Boolean, default=False) disabled_reason = Column(String(255)) last_seen_up = Column(DateTime, nullable=True) forced_down = Column(Boolean, default=False) report_count = Column(Integer, nullable=False, default=0) class Quota(Base): """Represents Quota for a resource within a project""" __tablename__ = 'quotas' __table_args__ = ( schema.UniqueConstraint( "project_id", "resource", name='uniq_quotas0project_id0resource'), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) resource = Column(String(255)) hard_limit = Column(Integer()) class Federation(Base): """Represents a Federation.""" __tablename__ = 'federation' __table_args__ = ( schema.UniqueConstraint("uuid", name="uniq_federation0uuid"), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) uuid = Column(String(36)) name = Column(String(255)) hostcluster_id = Column(String(255)) member_ids = Column(JSONEncodedList) status = Column(String(20)) status_reason = Column(Text) properties = Column(JSONEncodedDict) magnum-6.1.0/magnum/db/sqlalchemy/__init__.py0000666000175100017510000000000013244017334021143 0ustar zuulzuul00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/0000775000175100017510000000000013244017675020446 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/env.py0000666000175100017510000000334713244017334021611 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from magnum.db.sqlalchemy import api as sqla_api from magnum.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = sqla_api.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online() magnum-6.1.0/magnum/db/sqlalchemy/alembic/script.py.mako0000666000175100017510000000053513244017334023247 0ustar zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} magnum-6.1.0/magnum/db/sqlalchemy/alembic/README0000666000175100017510000000062513244017334021323 0ustar zuulzuul00000000000000Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation To create alembic migrations use: $ magnum-db-manage revision --message "description of revision" --autogenerate Stamp db with most recent migration version, without actually running migrations $ magnum-db-manage stamp head Upgrade can be performed by: $ magnum-db-manage upgrade $ magnum-db-manage upgrade head magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/0000775000175100017510000000000013244017675022316 5ustar zuulzuul00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py0000666000175100017510000000345413244017334031170 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create x509keypair table Revision ID: 421102d1f2d2 Revises: 14328d6a57e3 Create Date: 2015-07-17 13:12:12.653241 """ # revision identifiers, used by Alembic. revision = '421102d1f2d2' down_revision = '14328d6a57e3' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'x509keypair', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('ca_cert', sa.Text()), sa.Column('certificate', sa.Text()), sa.Column('private_key', sa.Text()), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('user_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_unique_constraint("uniq_x509keypair0uuid", "x509keypair", ["uuid"]) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.0000666000175100017510000000365413244017334033334 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding magnum_service functionality Revision ID: 27ad304554e2 Revises: 1d045384b966 Create Date: 2015-09-01 18:27:14.371860 """ # revision identifiers, used by Alembic. revision = '27ad304554e2' down_revision = '1d045384b966' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'magnum_service', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('report_count', sa.Integer(), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.Column('binary', sa.String(length=255), nullable=True), sa.Column('disabled', sa.Boolean(), nullable=True), sa.Column('disabled_reason', sa.String(length=255), nullable=True), # 'last_seen_up' has different purpose than 'updated_at'. # 'updated_at' refers to any modification of the entry, which can # be administrative too, whereas 'last_seen_up' is more related to # magnum_service. Modeled after nova/servicegroup sa.Column('last_seen_up', sa.DateTime(), nullable=True), sa.Column('forced_down', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('host', 'binary', name='uniq_magnum_service0host0binary') ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py0000666000175100017510000000175513244017334032163 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-bay-master-address Revision ID: 29affeaa2bc2 Revises: 2d1354bbf76e Create Date: 2015-03-25 16:06:08.148629 """ # revision identifiers, used by Alembic. revision = '29affeaa2bc2' down_revision = '2d1354bbf76e' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('bay', 'master_address', new_column_name='api_address', existing_type=sa.String(255)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/a0e7c8450ab1_add_labels_to_cluster.py0000666000175100017510000000171213244017334031064 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add labels to cluster Revision ID: a0e7c8450ab1 Revises: bc46ba6cf949 Create Date: 2017-06-12 10:08:05.501441 """ # revision identifiers, used by Alembic. revision = 'a0e7c8450ab1' down_revision = 'aa0cc27839af' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster', sa.Column('labels', sa.Text(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py0000666000175100017510000000175613244017334033611 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add docker_volume_size to cluster Revision ID: aa0cc27839af Revises: bc46ba6cf949 Create Date: 2017-06-07 13:08:02.853105 """ # revision identifiers, used by Alembic. revision = 'aa0cc27839af' down_revision = 'bc46ba6cf949' from alembic import op import sqlalchemy as sa def upgrade(): pass op.add_column('cluster', sa.Column('docker_volume_size', sa.Integer(), nullable=True)) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymo0000666000175100017510000000163413244017334033507 0ustar zuulzuul00000000000000# Copyright 2016 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """remove_ssh_authorized_key_from_baymodel Revision ID: 049f81f6f584 Revises: ee92b41b8809 Create Date: 2016-02-28 15:27:26.211244 """ # revision identifiers, used by Alembic. revision = '049f81f6f584' down_revision = 'ee92b41b8809' from alembic import op def upgrade(): op.drop_column('baymodel', 'ssh_authorized_key') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py0000666000175100017510000000232513244017334031304 0ustar zuulzuul00000000000000# Copyright 2016 Intel Technologies India Pvt. Ld. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """modify x509keypair table Revision ID: d072f58ab240 Revises: e647f5931da8 Create Date: 2016-05-27 15:29:22.955268 """ # revision identifiers, used by Alembic. revision = 'd072f58ab240' down_revision = 'ef08a5e057bd' from alembic import op import sqlalchemy as sa def upgrade(): op.drop_column('x509keypair', 'bay_uuid') op.drop_column('x509keypair', 'name') op.drop_column('x509keypair', 'ca_cert') op.add_column('x509keypair', sa.Column('intermediates', sa.Text(), nullable=True)) op.add_column('x509keypair', sa.Column('private_key_passphrase', sa.Text(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py0000666000175100017510000000205313244017334030616 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add cert_uuuid to bay Revision ID: 6f21dc920bb Revises: 966a99e70ff Create Date: 2015-08-19 13:57:14.863292 """ # revision identifiers, used by Alembic. revision = '6f21dc920bb' down_revision = '966a99e70ff' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'bay', sa.Column('ca_cert_uuid', sa.String(length=36), nullable=True)) op.add_column( 'bay', sa.Column('magnum_cert_uuid', sa.String(length=36), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py0000666000175100017510000000170613244017343030354 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add volume driver Revision ID: 05d3e97de9ee Revises: 57fbdf2327a2 Create Date: 2016-01-12 06:21:24.880838 """ # revision identifiers, used by Alembic. revision = '05d3e97de9ee' down_revision = '57fbdf2327a2' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('volume_driver', sa.String(length=255), nullable=True)) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.0000666000175100017510000000173413244017334033331 0ustar zuulzuul00000000000000# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add public column to baymodel table Revision ID: 2ae93c9c6191 Revises: 5ad410481b88 Create Date: 2015-09-30 15:33:44.514290 """ # revision identifiers, used by Alembic. revision = '2ae93c9c6191' down_revision = '5ad410481b88' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('public', sa.Boolean(), default=False)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py0000666000175100017510000000152313244017334027001 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove pod object Revision ID: ef08a5e057bd Revises: e647f5931da8 Create Date: 2016-05-24 13:52:39.782156 """ # revision identifiers, used by Alembic. revision = 'ef08a5e057bd' down_revision = 'e647f5931da8' from alembic import op def upgrade(): op.drop_table('pod') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py0000666000175100017510000000153313244017334027442 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove service object Revision ID: 085e601a39f6 Revises: a1136d335540 Create Date: 2016-05-25 12:05:30.790282 """ # revision identifiers, used by Alembic. revision = '085e601a39f6' down_revision = 'a1136d335540' from alembic import op def upgrade(): op.drop_table('service') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py0000666000175100017510000000201513244017343033114 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add private network to baymodel Revision ID: 35cff7c86221 Revises: 3a938526b35d Create Date: 2015-02-26 05:02:34.260099 """ # revision identifiers, used by Alembic. revision = '35cff7c86221' down_revision = '3a938526b35d' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('fixed_network', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py0000666000175100017510000000174113244017334027566 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-insecure Revision ID: 5ad410481b88 Revises: 27ad304554e2 Create Date: 2015-09-29 17:51:10.195121 """ # revision identifiers, used by Alembic. revision = '5ad410481b88' down_revision = '27ad304554e2' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('baymodel', 'insecure', new_column_name='tls_disabled', existing_type=sa.Boolean()) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py0000666000175100017510000000167013244017334030456 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-env-to-container Revision ID: 5977879072a7 Revises: 417917e778f5 Create Date: 2015-11-26 04:10:39.462966 """ # revision identifiers, used by Alembic. revision = '5977879072a7' down_revision = '417917e778f5' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('container', sa.Column('environment', sa.Text(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py0000666000175100017510000000171613244017334032520 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add registry_trust_id to bay Revision ID: adc3b7679ae Revises: 40f325033343 Create Date: 2015-12-07 15:49:07.622122 """ # revision identifiers, used by Alembic. revision = 'adc3b7679ae' down_revision = '40f325033343' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('registry_trust_id', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py0000666000175100017510000000170613244017343030516 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add cluster distro Revision ID: 4956f03cabad Revises: 2d8657c0cdc Create Date: 2015-04-25 02:17:51.486547 """ # revision identifiers, used by Alembic. revision = '4956f03cabad' down_revision = '2d8657c0cdc' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('cluster_distro', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py0000666000175100017510000000201513244017334031122 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add docker volume size column Revision ID: 3a938526b35d Revises: 5793cd26898d Create Date: 2015-02-23 14:32:00.086650 """ # revision identifiers, used by Alembic. revision = '3a938526b35d' down_revision = '5793cd26898d' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('docker_volume_size', sa.Integer(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py0000666000175100017510000000167113244017334027114 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add bay uuid Revision ID: 2d8657c0cdc Revises: e772b2598d9 Create Date: 2015-04-22 16:59:06.799384 """ # revision identifiers, used by Alembic. revision = '2d8657c0cdc' down_revision = 'e772b2598d9' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('container', sa.Column('bay_uuid', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py0000666000175100017510000001355513244017343030341 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """initial migration Revision ID: 2581ebaf0cb2 Revises: None Create Date: 2014-01-17 12:14:07.754448 """ # revision identifiers, used by Alembic. revision = '2581ebaf0cb2' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): # commands auto generated by Alembic - please adjust! op.create_table( 'bay', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('baymodel_id', sa.String(length=255), nullable=True), sa.Column('node_count', sa.Integer(), nullable=True), sa.Column('master_address', sa.String(length=255), nullable=True), sa.Column('minions_address', sa.Text(), nullable=True), sa.Column('stack_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'baymodel', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('flavor_id', sa.String(length=255), nullable=True), sa.Column('keypair_id', sa.String(length=255), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.Column('external_network_id', sa.String(length=255), nullable=True), sa.Column('dns_nameserver', sa.String(length=255), nullable=True), sa.Column('apiserver_port', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'container', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'node', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('type', sa.String(length=20), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.Column('ironic_node_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'pod', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('desc', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('images', sa.Text(), nullable=False), sa.Column('labels', sa.Text(), nullable=True), sa.Column('status', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'service', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('labels', sa.Text, nullable=True), sa.Column('selector', sa.Text, nullable=True), sa.Column('ip', sa.String(length=36), nullable=True), sa.Column('port', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'replicationcontroller', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('images', sa.Text(), nullable=False), sa.Column('labels', sa.Text(), nullable=True), sa.Column('replicas', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) # end Alembic commands magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py0000666000175100017510000000166513244017334027443 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add bay status Revision ID: 5793cd26898d Revises: 3bea56f25597 Create Date: 2015-02-09 12:54:09.449948 """ # revision identifiers, used by Alembic. revision = '5793cd26898d' down_revision = '3bea56f25597' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('status', sa.String(length=20), nullable=True)) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.p0000666000175100017510000000177513244017334033363 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add floating_ip_enabled column to baymodel table Revision ID: b1f612248cab Revises: 859fb45df249 Create Date: 2016-08-05 15:31:46.203266 """ # revision identifiers, used by Alembic. revision = 'b1f612248cab' down_revision = '859fb45df249' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('floating_ip_enabled', sa.Boolean(), default=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py0000666000175100017510000000170213244017334030472 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add registry_enabled Revision ID: 4e263f236334 Revises: 5518af8dbc21 Create Date: 2015-09-14 18:39:25.871218 """ # revision identifiers, used by Alembic. revision = '4e263f236334' down_revision = '5518af8dbc21' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('registry_enabled', sa.Boolean(), default=False)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py0000666000175100017510000000152713244017334030455 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove node object Revision ID: bb42b7cad130 Revises: 05d3e97de9ee Create Date: 2016-02-02 16:04:36.501547 """ # revision identifiers, used by Alembic. revision = 'bb42b7cad130' down_revision = '05d3e97de9ee' from alembic import op def upgrade(): op.drop_table('node') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py0000666000175100017510000000215513244017343031745 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add version info to bay Revision ID: fcb4efee8f8b Revises: b1f612248cab Create Date: 2016-08-22 15:04:32.256811 """ # revision identifiers, used by Alembic. revision = 'fcb4efee8f8b' down_revision = 'b1f612248cab' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('coe_version', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('container_version', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py0000666000175100017510000000245713244017334030562 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create baylock table Revision ID: 456126c6c9e9 Revises: 2ace4006498 Create Date: 2015-04-01 15:04:45.652672 """ # revision identifiers, used by Alembic. revision = '456126c6c9e9' down_revision = '2ace4006498' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'baylock', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('conductor_id', sa.String(length=64), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py0000666000175100017510000000176713244017334031262 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add memory to container Revision ID: 33ef79969018 Revises: 2ae93c9c6191 Create Date: 2015-10-03 17:03:47.194253 """ # revision identifiers, used by Alembic. revision = '33ef79969018' down_revision = '2ae93c9c6191' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('container', sa.Column('memory', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py0000666000175100017510000000156113244017334032655 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove replication controller Revision ID: 859fb45df249 Revises: 1f196a3dabae Create Date: 2016-08-09 13:46:24.052528 """ # revision identifiers, used by Alembic. revision = '859fb45df249' down_revision = '1f196a3dabae' from alembic import op def upgrade(): op.drop_table('replicationcontroller') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py0000666000175100017510000000153713244017334030256 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove container object Revision ID: 1f196a3dabae Revises: e0653b2d5271 Create Date: 2016-06-02 11:42:42.200992 """ # revision identifiers, used by Alembic. revision = '1f196a3dabae' down_revision = 'e0653b2d5271' from alembic import op def upgrade(): op.drop_table('container') ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baym0000666000175100017510000000170213244017334033456 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master_lb_enabled column to baymodel table Revision ID: 68ce16dfd341 Revises: 085e601a39f6 Create Date: 2016-06-23 18:44:55.312413 """ # revision identifiers, used by Alembic. revision = '68ce16dfd341' down_revision = '085e601a39f6' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('master_lb_enabled', sa.Boolean(), default=False)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py0000666000175100017510000000235113244017343031647 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add coe column to BayModel Revision ID: 592131657ca1 Revises: 4956f03cabad Create Date: 2015-04-17 14:20:17.620995 """ # revision identifiers, used by Alembic. revision = '592131657ca1' down_revision = '4956f03cabad' from alembic import op import magnum.conf import sqlalchemy as sa CONF = magnum.conf.CONF def upgrade(): op.add_column('baymodel', sa.Column('coe', sa.String(length=255), nullable=True)) baymodel = sa.sql.table('baymodel', sa.sql.column('coe', sa.String(length=255))) op.execute( baymodel.update().values({ 'coe': op.inline_literal("kubernetes")}) ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/04c625aa95ba_change_storage_driver_to_string.py0000666000175100017510000000220413244017334033162 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change storage driver to string Revision ID: 04c625aa95ba Revises: 52bcaf58fecb Create Date: 2017-10-10 15:40:37.553288 """ # revision identifiers, used by Alembic. revision = '04c625aa95ba' down_revision = '52bcaf58fecb' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('cluster_template', 'docker_storage_driver', existing_type=sa.Enum('devicemapper', 'overlay', name='docker_storage_driver'), type_=sa.String(length=512), nullable=True) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py0000666000175100017510000000173613244017334030714 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_container_status Revision ID: 59e7664a8ba1 Revises: 2b5f24dd95de Create Date: 2015-05-11 11:33:23.125790 """ # revision identifiers, used by Alembic. revision = '59e7664a8ba1' down_revision = '2b5f24dd95de' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('container', sa.Column('status', sa.String(length=20), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py0000666000175100017510000000245413244017343030076 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rename cert_uuid Revision ID: 5518af8dbc21 Revises: 6f21dc920bb Create Date: 2015-08-28 13:13:19.747625 """ # revision identifiers, used by Alembic. revision = '5518af8dbc21' down_revision = '6f21dc920bb' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('bay', 'ca_cert_uuid', new_column_name='ca_cert_ref', existing_type=sa.String(length=36), type_=sa.String(length=512), nullable=True) op.alter_column('bay', 'magnum_cert_uuid', new_column_name='magnum_cert_ref', existing_type=sa.String(length=36), type_=sa.String(length=512), nullable=True) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py0000666000175100017510000000173613244017334030654 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-container-command Revision ID: e772b2598d9 Revises: 4ea34a59a64c Create Date: 2015-04-17 18:59:52.770329 """ # revision identifiers, used by Alembic. revision = 'e772b2598d9' down_revision = '4ea34a59a64c' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('container', sa.Column('command', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py0000666000175100017510000000235213244017334033062 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add docker storage driver column Revision ID: a1136d335540 Revises: d072f58ab240 Create Date: 2016-03-07 19:00:28.738486 """ # revision identifiers, used by Alembic. revision = 'a1136d335540' down_revision = 'd072f58ab240' from alembic import op import sqlalchemy as sa docker_storage_driver_enum = sa.Enum('devicemapper', 'overlay', name='docker_storage_driver') def upgrade(): docker_storage_driver_enum.create(op.get_bind(), checkfirst=True) op.add_column('baymodel', sa.Column('docker_storage_driver', docker_storage_driver_enum, nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/52bcaf58fecb_add_master_flavor_id_to_cluster.py0000666000175100017510000000175213244017334033401 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master_flavor_id to cluster Revision ID: 52bcaf58fecb Revises: a0e7c8450ab1 Create Date: 2017-08-01 11:22:31.277745 """ # revision identifiers, used by Alembic. revision = '52bcaf58fecb' down_revision = 'a0e7c8450ab1' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster', sa.Column('master_flavor_id', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py0000666000175100017510000000166613244017334031176 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_bay_status_reason Revision ID: 156ceb17fb0a Revises: 59e7664a8ba1 Create Date: 2015-05-30 11:34:57.847071 """ # revision identifiers, used by Alembic. revision = '156ceb17fb0a' down_revision = '59e7664a8ba1' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('status_reason', sa.Text, nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/9a1539f1cd2c_add_federation_table.py0000666000175100017510000000333313244017334030661 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """"add federation table Revision ID: 9a1539f1cd2c Revises: 041d9a0f1159 Create Date: 2017-08-07 11:47:29.865166 """ # revision identifiers, used by Alembic. revision = '9a1539f1cd2c' down_revision = '041d9a0f1159' from alembic import op import sqlalchemy as sa from magnum.db.sqlalchemy import models def upgrade(): op.create_table( 'federation', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('hostcluster_id', sa.String(length=255), nullable=True), sa.Column('member_ids', models.JSONEncodedList(), nullable=True), sa.Column('status', sa.String(length=20), nullable=True), sa.Column('status_reason', sa.Text(), nullable=True), sa.Column('properties', models.JSONEncodedList(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_federation0uuid') ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py0000666000175100017510000000174313244017343031622 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-discovery-url-to-bay Revision ID: 4ea34a59a64c Revises: 456126c6c9e9 Create Date: 2015-04-14 18:56:03.440329 """ # revision identifiers, used by Alembic. revision = '4ea34a59a64c' down_revision = '456126c6c9e9' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('discovery_url', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/041d9a0f1159_add_flavor_id_to_cluster.py0000666000175100017510000000173413244017334031437 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add flavor_id to cluster Revision ID: 041d9a0f1159 Revises: 04c625aa95ba Create Date: 2017-07-31 12:46:00.777841 """ # revision identifiers, used by Alembic. revision = '041d9a0f1159' down_revision = '04c625aa95ba' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster', sa.Column('flavor_id', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py0000666000175100017510000000201313244017334031672 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-bay-minions-address Revision ID: 2ace4006498 Revises: 29affeaa2bc2 Create Date: 2015-03-27 15:15:36.309601 """ # revision identifiers, used by Alembic. revision = '2ace4006498' down_revision = '29affeaa2bc2' from alembic import op from magnum.db.sqlalchemy import models def upgrade(): op.alter_column('bay', 'minions_address', new_column_name='node_addresses', existing_type=models.JSONEncodedList()) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py0000666000175100017510000000313513244017334031376 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add unique constraints Revision ID: 3b6c4c42adb4 Revises: 592131657ca1 Create Date: 2015-05-05 09:45:44.657047 """ # revision identifiers, used by Alembic. revision = '3b6c4c42adb4' down_revision = '592131657ca1' from alembic import op def upgrade(): op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"]) op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock", ["bay_uuid"]) op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"]) op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"]) op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"]) op.create_unique_constraint("uniq_node0ironic_node_id", "node", ["ironic_node_id"]) op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"]) op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"]) op.create_unique_constraint("uniq_replicationcontroller0uuid", "replicationcontroller", ["uuid"]) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py0000666000175100017510000000170313244017334031061 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add host column to pod Revision ID: 53882537ac57 Revises: 1c1ff5e56048 Create Date: 2015-06-25 16:52:47.159887 """ # revision identifiers, used by Alembic. revision = '53882537ac57' down_revision = '1c1ff5e56048' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('pod', sa.Column('host', sa.Text, nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py0000666000175100017510000000176613244017334030712 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename service port Revision ID: 2b5f24dd95de Revises: 592131657ca1 Create Date: 2015-04-29 05:52:52.204095 """ # revision identifiers, used by Alembic. revision = '2b5f24dd95de' down_revision = '3b6c4c42adb4' from alembic import op from magnum.db.sqlalchemy import models def upgrade(): op.alter_column('service', 'port', new_column_name='ports', existing_type=models.JSONEncodedList()) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py0000666000175100017510000000165613244017334032266 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add bay_create_timeout to bay Revision ID: 40f325033343 Revises: 5977879072a7 Create Date: 2015-12-02 16:38:54.697413 """ # revision identifiers, used by Alembic. revision = '40f325033343' down_revision = '5977879072a7' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('bay_create_timeout', sa.Integer(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py0000666000175100017510000000172113244017334031273 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master count to bay Revision ID: 14328d6a57e3 Revises: 53882537ac57 Create Date: 2015-07-29 16:00:38.721016 """ # revision identifiers, used by Alembic. revision = '14328d6a57e3' down_revision = '53882537ac57' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('master_count', sa.Integer(), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py0000666000175100017510000000171013244017334030442 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add master flavor Revision ID: 1afee1db6cd0 Revises: 3a938526b35d Create Date: 2015-02-27 14:53:38.042900 """ # revision identifiers, used by Alembic. revision = '1afee1db6cd0' down_revision = '35cff7c86221' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('master_flavor_id', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py0000666000175100017510000000227213244017343026435 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-proxy Revision ID: 966a99e70ff Revises: 6f21dc998bb Create Date: 2015-08-24 11:23:24.262921 """ # revision identifiers, used by Alembic. revision = '966a99e70ff' down_revision = '6f21dc998bb' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('http_proxy', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('https_proxy', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('no_proxy', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py0000666000175100017510000000226113244017334032131 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename bay table to cluster Revision ID: 720f640f43d1 Revises: fb03fdef8919 Create Date: 2016-09-02 09:43:41.485934 """ # revision identifiers, used by Alembic. revision = '720f640f43d1' down_revision = 'fb03fdef8919' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('bay', 'baymodel_id', new_column_name='cluster_template_id', existing_type=sa.String(255)) op.alter_column('bay', 'bay_create_timeout', new_column_name='create_timeout', existing_type=sa.Integer()) op.rename_table('bay', 'cluster') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py0000666000175100017510000000173313244017334031373 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add keypair to cluster Revision ID: bc46ba6cf949 Revises: 720f640f43d1 Create Date: 2016-10-03 10:47:08.584635 """ # revision identifiers, used by Alembic. revision = 'bc46ba6cf949' down_revision = '720f640f43d1' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster', sa.Column('keypair', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py0000666000175100017510000000177513244017334030475 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ssh authorized key Revision ID: 2d1354bbf76e Revises: 1afee1db6cd0 Create Date: 2015-03-13 14:05:58.744652 """ # revision identifiers, used by Alembic. revision = '2d1354bbf76e' down_revision = '1afee1db6cd0' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('ssh_authorized_key', sa.Text, nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py0000666000175100017510000000303113244017334030513 0ustar zuulzuul00000000000000# Copyright 2016 Yahoo! Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Introduce Quotas Revision ID: ee92b41b8809 Revises: 5d4caa6e0a42 Create Date: 2016-02-26 18:32:08.992964 """ # revision identifiers, used by Alembic. revision = 'ee92b41b8809' down_revision = '5d4caa6e0a42' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'quotas', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('resource', sa.String(length=255), nullable=True), sa.Column('hard_limit', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_unique_constraint( "uniq_quotas0project_id0resource", "quotas", ["project_id", "resource"]) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py0000666000175100017510000000452213244017334027217 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Multi Tenant Support Revision ID: 3bea56f25597 Revises: 2581ebaf0cb2 Create Date: 2015-01-22 22:22:22.150632 """ # revision identifiers, used by Alembic. revision = '3bea56f25597' down_revision = '2581ebaf0cb2' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('bay', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('container', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('container', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('node', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('node', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('pod', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('pod', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('service', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('service', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('replicationcontroller', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('replicationcontroller', sa.Column('user_id', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py0000666000175100017510000000174713244017334031643 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename_container_image_id Revision ID: 1c1ff5e56048 Revises: 156ceb17fb0a Create Date: 2015-06-18 10:21:40.991734 """ # revision identifiers, used by Alembic. revision = '1c1ff5e56048' down_revision = '156ceb17fb0a' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('container', 'image_id', new_column_name='image', existing_type=sa.String(255)) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.p0000666000175100017510000000202713244017343033421 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_network_driver_baymodel_column Revision ID: 3be65537a94a Revises: 4e263f236334 Create Date: 2015-09-03 20:51:54.229436 """ # revision identifiers, used by Alembic. revision = '3be65537a94a' down_revision = '4e263f236334' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('network_driver', sa.String(length=255), nullable=True)) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.0000666000175100017510000000173213244017334033305 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add labels column to baymodel table Revision ID: 1481f5b560dd Revises: 3be65537a94a Create Date: 2015-09-02 22:34:07.590142 """ # revision identifiers, used by Alembic. revision = '1481f5b560dd' down_revision = '3be65537a94a' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('labels', sa.Text(), nullable=True)) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.p0000666000175100017510000000157713244017334033617 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """rename_baymodel_to_clustertemplate Revision ID: fb03fdef8919 Revises: fcb4efee8f8b Create Date: 2016-08-31 12:40:31.165817 """ # revision identifiers, used by Alembic. revision = 'fb03fdef8919' down_revision = 'fcb4efee8f8b' from alembic import op def upgrade(): op.rename_table('baymodel', 'cluster_template') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py0000666000175100017510000000200413244017334032176 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add master_addresses to bay Revision ID: 6f21dc998bb Revises: 421102d1f2d2 Create Date: 2015-08-20 13:57:14.863292 """ # revision identifiers, used by Alembic. revision = '6f21dc998bb' down_revision = '421102d1f2d2' from alembic import op from magnum.db.sqlalchemy import models import sqlalchemy as sa def upgrade(): op.add_column( 'bay', sa.Column('master_addresses', models.JSONEncodedList(), nullable=True) ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py0000666000175100017510000000152413244017334027563 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove baylock Revision ID: 57fbdf2327a2 Revises: adc3b7679ae Create Date: 2015-12-17 09:27:18.429773 """ # revision identifiers, used by Alembic. revision = '57fbdf2327a2' down_revision = 'adc3b7679ae' from alembic import op def upgrade(): op.drop_table('baylock') magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py0000666000175100017510000000257613244017343032336 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create trustee for each bay Revision ID: 5d4caa6e0a42 Revises: bb42b7cad130 Create Date: 2016-02-17 14:16:12.927874 """ # revision identifiers, used by Alembic. revision = '5d4caa6e0a42' down_revision = 'bb42b7cad130' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('bay', 'registry_trust_id', new_column_name='trust_id', existing_type=sa.String(255)) op.add_column('bay', sa.Column('trustee_username', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('trustee_user_id', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('trustee_password', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py0000666000175100017510000000213013244017334031674 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-insecure-baymodel-attr Revision ID: 1d045384b966 Revises: 1481f5b560dd Create Date: 2015-09-23 18:17:10.195121 """ # revision identifiers, used by Alembic. revision = '1d045384b966' down_revision = '1481f5b560dd' from alembic import op import sqlalchemy as sa def upgrade(): insecure_column = sa.Column('insecure', sa.Boolean(), default=False) op.add_column('baymodel', insecure_column) baymodel = sa.sql.table('baymodel', insecure_column) op.execute( baymodel.update().values({'insecure': True}) ) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py0000666000175100017510000000177413244017334032132 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add server_type column to baymodel Revision ID: 417917e778f5 Revises: 33ef79969018 Create Date: 2015-10-14 16:21:57.229436 """ # revision identifiers, used by Alembic. revision = '417917e778f5' down_revision = '33ef79969018' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('server_type', sa.String(length=255), nullable=True, server_default='vm')) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.pymagnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_0000666000175100017510000000176213244017343033353 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add fixed_subnet column to baymodel table Revision ID: e0653b2d5271 Revises: 68ce16dfd341 Create Date: 2016-06-29 14:14:37.862594 """ # revision identifiers, used by Alembic. revision = 'e0653b2d5271' down_revision = '68ce16dfd341' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('fixed_subnet', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py0000666000175100017510000000175713244017343033461 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add insecure_registry to baymodel Revision ID: e647f5931da8 Revises: 049f81f6f584 Create Date: 2016-03-28 09:08:07.467102 """ # revision identifiers, used by Alembic. revision = 'e647f5931da8' down_revision = '049f81f6f584' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('baymodel', sa.Column('insecure_registry', sa.String(length=255), nullable=True)) magnum-6.1.0/magnum/db/migration.py0000666000175100017510000000263013244017334017246 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from stevedore import driver import magnum.conf CONF = magnum.conf.CONF _IMPL = None def get_backend(): global _IMPL if not _IMPL: _IMPL = driver.DriverManager("magnum.database.migration_backend", CONF.database.backend).driver return _IMPL def upgrade(version=None): """Migrate the database to `version` or the most recent version.""" return get_backend().upgrade(version) def version(): return get_backend().version() def stamp(version): return get_backend().stamp(version) def revision(message, autogenerate): return get_backend().revision(message, autogenerate) magnum-6.1.0/magnum/db/api.py0000666000175100017510000004363613244017334016041 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for storage engines """ import abc from oslo_config import cfg from oslo_db import api as db_api import six from magnum.common import profiler _BACKEND_MAPPING = {'sqlalchemy': 'magnum.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): """Return a DB API instance.""" return IMPL @profiler.trace_cls("db") @six.add_metaclass(abc.ABCMeta) class Connection(object): """Base class for storage system connections.""" @abc.abstractmethod def __init__(self): """Constructor.""" @abc.abstractmethod def get_cluster_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching clusters. Return a list of the specified columns for all clusters that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of clusters to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_cluster(self, values): """Create a new cluster. :param values: A dict containing several items used to identify and track the cluster, and several dicts which are passed into the Drivers when managing this cluster. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'type': 'virt' } :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_id(self, context, cluster_id): """Return a cluster. :param context: The security context :param cluster_id: The id of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_uuid(self, context, cluster_uuid): """Return a cluster. :param context: The security context :param cluster_uuid: The uuid of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_name(self, context, cluster_name): """Return a cluster. :param context: The security context :param cluster_name: The name of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_stats(self, context, project_id): """Return clusters stats for the given project. :param context: The security context :param project_id: The project id. :returns: clusters, nodes count. """ @abc.abstractmethod def get_cluster_count_all(self, context, filters=None): """Get count of matching clusters. :param context: The security context :param filters: Filters to apply. Defaults to None. :returns: Count of matching clusters. """ @abc.abstractmethod def destroy_cluster(self, cluster_id): """Destroy a cluster and all associated interfaces. :param cluster_id: The id or uuid of a cluster. """ @abc.abstractmethod def update_cluster(self, cluster_id, values): """Update properties of a cluster. :param cluster_id: The id or uuid of a cluster. :returns: A cluster. :raises: ClusterNotFound """ @abc.abstractmethod def get_cluster_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching ClusterTemplates. Return a list of the specified columns for all ClusterTemplates that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of ClusterTemplates to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_cluster_template(self, values): """Create a new ClusterTemplate. :param values: A dict containing several items used to identify and track the ClusterTemplate, and several dicts which are passed into the Drivers when managing this ClusterTemplate. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'type': 'virt' } :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_id(self, context, cluster_template_id): """Return a ClusterTemplate. :param context: The security context :param cluster_template_id: The id of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_uuid(self, context, cluster_template_uuid): """Return a ClusterTemplate. :param context: The security context :param cluster_template_uuid: The uuid of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_name(self, context, cluster_template_name): """Return a ClusterTemplate. :param context: The security context :param cluster_template_name: The name of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def destroy_cluster_template(self, cluster_template_id): """Destroy a ClusterTemplate and all associated interfaces. :param cluster_template_id: The id or uuid of a ClusterTemplate. """ @abc.abstractmethod def update_cluster_template(self, cluster_template_id, values): """Update properties of a ClusterTemplate. :param cluster_template_id: The id or uuid of a ClusterTemplate. :returns: A ClusterTemplate. :raises: ClusterTemplateNotFound """ @abc.abstractmethod def create_x509keypair(self, values): """Create a new x509keypair. :param values: A dict containing several items used to identify and track the x509keypair, and several dicts which are passed into the Drivers when managing this x509keypair. For example: :: { 'uuid': uuidutils.generate_uuid(), 'certificate': 'AAA...', 'private_key': 'BBB...', 'private_key_passphrase': 'CCC...', 'intermediates': 'DDD...', } :returns: A X509KeyPair. """ @abc.abstractmethod def get_x509keypair_by_id(self, context, x509keypair_id): """Return a x509keypair. :param context: The security context :param x509keypair_id: The id of a x509keypair. :returns: A x509keypair. """ @abc.abstractmethod def get_x509keypair_by_uuid(self, context, x509keypair_uuid): """Return a x509keypair. :param context: The security context :param x509keypair_uuid: The uuid of a x509keypair. :returns: A x509keypair. """ @abc.abstractmethod def destroy_x509keypair(self, x509keypair_id): """Destroy a x509keypair. :param x509keypair_id: The id or uuid of a x509keypair. """ @abc.abstractmethod def update_x509keypair(self, x509keypair_id, values): """Update properties of a X509KeyPair. :param x509keypair_id: The id or uuid of a X509KeyPair. :returns: A X509KeyPair. """ @abc.abstractmethod def get_x509keypair_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching x509keypairs. Return a list of the specified columns for all x509keypairs that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of x509keypairs to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def destroy_magnum_service(self, magnum_service_id): """Destroys a magnum_service record. :param magnum_service_id: The id of a magnum_service. """ @abc.abstractmethod def update_magnum_service(self, magnum_service_id, values): """Update properties of a magnum_service. :param magnum_service_id: The id of a magnum_service record. """ @abc.abstractmethod def get_magnum_service_by_host_and_binary(self, host, binary): """Return a magnum_service record. :param host: The host where the binary is located. :param binary: The name of the binary. :returns: A magnum_service record. """ @abc.abstractmethod def create_magnum_service(self, values): """Create a new magnum_service record. :param values: A dict containing several items used to identify and define the magnum_service record. :returns: A magnum_service record. """ @abc.abstractmethod def get_magnum_service_list(self, disabled=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching magnum_service records. Return a list of the specified columns for all magnum_services those match the specified filters. :param disabled: Filters disbaled services. Defaults to None. :param limit: Maximum number of magnum_services to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_quota(self, values): """Create a new Quota record for a resource in a project. :param values: A dict containing several items used to identify and track quota for a resource in a project. :: { 'id': uuidutils.generate_uuid(), 'project_id': 'fake_project', 'resource': 'fake_resource', 'hard_limit': 'fake_hardlimit', } :returns: A quota record. """ @abc.abstractmethod def update_quota(self, project_id, values): """Update quota record. :param project_id: The project id. :param values: A dict containing several items used to identify and track quota for a resource in a project. :: { 'id': uuidutils.generate_uuid(), 'project_id': 'fake_project', 'resource': 'fake_resource', 'hard_limit': 'fake_hardlimit', } :returns: A quota record. """ @abc.abstractmethod def delete_quota(self, project_id, resource): """Delete a quota. :param project_id: Project id. :param resource: resource name. """ @abc.abstractmethod def get_quota_by_id(self, context, quota_id): """Return a quota. :param context: The security context :param quota_id: The id of a quota. :returns: A quota. """ @abc.abstractmethod def get_quota_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get quota list. Return a list of the specified columns for all quotas that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of clusters to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def quota_get_all_by_project_id(self, project_id): """Gets Quota record for all the resources in a project. :param project_id: Project identifier of the project. :returns: Quota record for all resources in a project. """ @abc.abstractmethod def get_quota_by_project_id_resource(self, project_id, resource): """Gets quota record for the given quota id. :param project_id: project id. :param resource: resource name. :returns: Quota record. """ @abc.abstractmethod def get_federation_by_id(self, context, federation_id): """Return a federation for a given federation id. :param context: The security context :param federation_id: The id of a federation :returns: A federation """ @abc.abstractmethod def get_federation_by_uuid(self, context, federation_uuid): """Return a federation for a given federation uuid. :param context: The security context :param federation_uuid: The uuid of a federation :returns: A federation """ @abc.abstractmethod def get_federation_by_name(self, context, federation_name): """Return a federation for a given federation name. :param context: The security context :param federation_name: The name of a federation :returns: A federation """ @abc.abstractmethod def get_federation_list(self, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Get matching federations. Return a list of the specified columns for all federations that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of federations to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_federation(self, values): """Create a new federation. :param values: A dict containing several items used to identify and track the federation. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd', 'properties': 'dns-zone:example.com.' } :returns: A federation. """ @abc.abstractmethod def destroy_federation(self, federation_id): """Destroy a federation. This action *will not* destroy the host cluster nor the member clusters. :param federation_id: The id or uuid of a federation. """ @abc.abstractmethod def update_federation(self, federation_id, values): """Update properties of a federation. :param federation_id: The id or uuid of a federation. :param values: A dict containing several items used to identify and track the federation. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd', 'properties': 'dns-zone:example.com.' } :returns: A federation. :raises: FederationNotFound """ magnum-6.1.0/magnum/db/__init__.py0000666000175100017510000000000013244017334017001 0ustar zuulzuul00000000000000magnum-6.1.0/tox.ini0000666000175100017510000001215013244017343014343 0ustar zuulzuul00000000000000[tox] minversion = 1.6 envlist = py35,py27,pep8 skipsdist = True [testenv] usedevelop = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/queens} -U {opts} {packages} whitelist_externals = bash find rm setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt passenv = TEMPEST_* OS_TEST_* commands = find . -type f -name "*.py[c|o]" -delete rm -f .testrepository/times.dbm bash tools/pretty_tox.sh '{posargs}' [testenv:debug] commands = oslo_debug_helper -t magnum/tests/unit {posargs} [testenv:debug-py27] basepython = python2.7 commands = oslo_debug_helper -t magnum/tests/unit {posargs} [testenv:debug-py35] basepython = python3.5 commands = oslo_debug_helper -t magnum/tests/unit {posargs} [testenv:functional-api] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/api OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-k8s] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/k8s OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-k8s-ironic] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/k8s_ironic OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-k8s-coreos] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/k8s_coreos OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-swarm] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/swarm OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-swarm-mode] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/swarm_mode OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:functional-mesos] sitepackages = True setenv = {[testenv]setenv} OS_TEST_PATH=./magnum/tests/functional/mesos OS_TEST_TIMEOUT=7200 deps = {[testenv]deps} commands = find . -type f -name "*.py[c|o]" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:pep8] commands = doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst bash tools/flake8wrap.sh {posargs} bandit -r magnum -x tests -n5 -ll bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ -not \( -type d -name doc -prune \) \ -not \( -type d -name contrib -prune \) \ -type f \ -name \*.sh \ -print0 | xargs -0 bashate -v -iE006,E010,E042 -eE005" [testenv:venv] commands = {posargs} [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -r magnum -x tests -n5 -ll [testenv:cover] commands = {toxinidir}/tools/cover.sh {posargs} [testenv:docs] commands = doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst python setup.py build_sphinx [testenv:genconfig] commands = oslo-config-generator --config-file etc/magnum/magnum-config-generator.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file etc/magnum/magnum-policy-generator.conf [flake8] # H106 Don’t put vim configuration in source files # H203 Use assertIs(Not)None to check for None # H904 Delay string interpolations at logging calls filename = *.py,app.wsgi enable-extensions = H106,H203,H904 exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,tools,releasenotes [hacking] local-check-factory = magnum.hacking.checks.factory [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs -rrequirements.txt commands=pip-missing-reqs -d --ignore-file=magnum/tests/* magnum [testenv:releasenotes] commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:install-guide] commands = sphinx-build -a -E -W -d install-guide/build/doctrees -b html install-guide/source install-guide/build/html [testenv:api-ref] commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html magnum-6.1.0/ChangeLog0000664000175100017510000030614413244017674014620 0ustar zuulzuul00000000000000CHANGES ======= 6.1.0 ----- * Update kubernetes dashboard to v1.8.3 * k8s: allow passing extra options to kube daemons * [kubernetes] add ingress controller * kuberntes: Disable the scale\_manager for scale down * Run etcd and flanneld in a system container * Admin can now delete clusters in any project * Driver's name are case sensitive * Support calico as network driver * Add support for Octavia resources in Heat * Using v1.9.3 as default k8s version * Add disabled\_drivers config option * federation api: api endpoints * [k8s] allow enabling kubernetes cert manager api * Now user can update label values in cluster-template * Add missed space in k8s template file * Document use of kube\_tag label * Updated from global requirements * k8s: Fix kubelet, add RBAC and pass e2e tests * Update UPPER\_CONSTRAINTS\_FILE for stable/queens * Update .gitreview for stable/queens 6.0.1 ----- * Add issue to reno for the incompatible k8s client * Support accessing all clusters/templates across projects * Deprecate usage of tenant and user in context * Add label availability\_zone * Add send\_cluster\_metrics configuration parameter * Start RPC service before waiting * Remove broken job magnum-non-functional-tox-migration * Zuul: Remove project name * Support soft-anti-affinity policy for nodes * ci: Add redirection from /v2 to /identity/v2 * Add openstack\_ca\_file configuration option * [k8s] Add missing verify\_ca in minion\_wc\_notify * fix url for versioned objects docs in code * federation api: federation table and db layer * Change the name of kubernetes-dashboard deployment * [k8s] Take container\_infra\_prefix from cluster if specified * Don't run functional jobs on api-ref changes * Fix policies for quotas * Use barbicanclient.v1 instead of barbicanclient * Fix image list and usage in contributor quickstart * Fix: functional CI Jobs * doc: Use os\_distro instead of os-distro * Fix Usage of cliff commandmanager * Update docs to use openstack client commands * Update Fedora Atomic image name * Add missing translation for verify\_ca * Updated from global requirements * [k8s] Take kube\_tag from cluster if specified * Leverage heat-container-agent for monitoring * Allow flavor\_id on cluster create * Make docker\_storage\_driver a str instead of enum * Remove intree magnum tempest plugin * [doc-migration] Consolidate install guide * The os\_distro of image is case sensitive * k8s\_atomic: Remove kubelet and kube-proxy from master * Updated from global requirements * Generate lower case stack name * Add verify\_ca configuration parameter * k8s\_atomic: Add server to kubeconfig * Add app.wsgi to target of pep8 * Remove setting of version/release from releasenotes * Updated from global requirements * Fix: magnum devstack installation with tls-proxy * Updated from global requirements * Updated from global requirements * Redundant alias in import statement * Do not use “-y†for package install * Using --option ARGUMENT * Generate stack name as a valid hostname * Zuul: add file extension to playbook path * Doc Fix for Alembic multiple heads error * Add sample policy configuration to doc * Register default magnum service and stat policies in code * Register default certificate policies in code * Register default quota policies in code * Register default cluster template policies in code * Register default cluster policies in code * Register default baymodel policies in code * Register default bay policies in code * Implement basic policy module in code * use keystoneauth1 session in functional test * Fix use of irrelevant-files parameter * Add /etc/environment to flannel/etcd/kubelet * Updated from global requirements * Add labels to api-ref cluster create * Migrate to Zuul v3 * Fix user-guide formatting * Fix magnum TLS cert generation * Fix to use the correct hyperlink * Swarm: Incorrect reference to Flannel variables * [swarm-fedora-atomic] fix cluster etcd\_lb protocol definition * Allow master\_flavor\_id on cluster create * Add kube\_dashboard\_enabled label to user guide * Updated from global requirements * Fix prometheus scrape configuration * writing convention: do not use “-y†for package install * k8s\_fedora: Add container\_infra\_prefix label * Add default configuration files to data\_files * Remove SCREEN\_LOGDIR from devstack setting * Updated from global requirements * Avoid running periodic processes inside each worker process * Update CoreDNS to 011 * Updated from global requirements * k8s: Fix node-exporter manifest * Use newer location for iso8601 UTC * Updated from global requirements * Imported Translations from Zanata * writing convention set to use "." to source script files * Updated from global requirements * Imported Translations from Zanata * Update reno for stable/pike * Remove TENANT\_NAME from /etc/sysconfig/heat-params * Fix no\_proxy evaluation for Swarm clusters 5.0.0 ----- * Trivial typo fix * Add a kube\_tag label to control the k8s containers to pull * Launch kube-proxy as a system container * Launch k8s scheduler & controller-manager as system containers * Use atomic containers for kubelet & apiserver * Allow labels on cluster create * Remove /etc/ssl/certs in the controller manager pod * Add default for [cinder]default\_docker\_volume\_type * tests: Use swarm-mode for api tests * Updated from global requirements * Remove deprecated usage of CORS.set\_latent * Deal with db\_exc.DBDuplicate of conductor startup * Remove unused config periodic\_global\_stack\_list * Fix usage of --kubelet-preferred-address arg for apiserver * Copy service configurations also * Clean-up server names in drivers * Imported Translations from Zanata * Remove repeated auth\_url * Move to OpenStack client * Fix barbicanclient and swarm-ci * Don't poll heat if no stack exists * Extract kubernetes baremetal ports * Move all kubernetes files in /etc/kubernetes * [doc-migration] Adds configuration folder * [doc-migration] Add user folder for related documents * [doc-migration] Add install folder for related documents * Stop using deprecated 'message' attribute in Exception * Use kubernetes service name in cert request * Updated from global requirements * k8s: Fix apiserver configuration * Fix some reST field lists in docstrings in magnum * Add attribute 'disabled' for service-list * Updated from global requirements * [doc-migration] Add admin folder for related documents * Add swarm-mode driver * Copy cluster nodes logs always whether tests pass or fail * Update URL home-page in documents according to document migration * [Fix ironic gate] Use IP\_VERSION=4 in devstack local.conf * Add a hacking rule for string interpolation at logging String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. See the oslo i18n guideline \* https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages and \* https://github.com/openstack-dev/hacking/blob/master/hacking/checks/other.py#L39 * Add Cinder-API-ver to k8s-cloud-provider config * Add reno for etcd\_volume\_size label * Use 'sudo' to access /etc/sysconfig/heat-params * Add warning-is-error in setup.cfg * Move the contributor related docs to contributor/ dir * Update Documentation link in README * Switch from oslosphinx to openstackdocstheme * ci: Remove \*\_ssh ironic drivers * k8s-fedora: Add etcd\_volume\_size label * Fix cluster inheritence of docker\_volume\_size * Updated from global requirements * Use DIB\_RELEASE to set fedora-atomic variable defaults * [opensuse] Increase wait\_condition\_timeout * Update .gitignore to ignore .eggs * Enable some off-by-default checks * Allow docker\_volume\_size on cluster create * Add needed details for Magnum Project * Set access\_policy for messaging's dispatcher * Updated from global requirements * Swarm: simplify heat WC signalling with $WAIT\_CURL * Use lowercase keys for swarm waitcondition signal * Fix typo in magnum/hacking/checks.py for consistency * Add api-ref about quotas-delete * Updated from global requirements * Revert "Using assertFalse(A) instead of assertEqual(False, A)" * Fix the unexist url * Updated from global requirements * Move to docker python SDK 2.x.x * Updated from global requirements * Fix wrong references url to right * Remove duplicated hacking rule M318,M319 * fix the function named get\_count\_all * Use get\_rpc\_transport instead of get\_transport * Updated from global requirements * Update the 'service-list' api-ref * Fix html\_last\_updated\_fmt for Python3 * [opensuse] Enabling external loadbalancer feature * k8s-fedora: Add docker\_volume\_type label * Updated from global requirements * Add DC/OS dependency installation script * Optimize the link address * swarm: Add docker\_volume\_type label * Add reno for docker\_volume\_type label * Use eventlet executor in rpc\_service * Document docker\_volume\_type option * doc: Add kubernetes example in Launch an instance * Update link to k8s doc and minor formatting * Updated from global requirements * Remove disable script of firewalld * Updated from global requirements * Updated from global requirements * doc: Add Xenial to devstack quickstart guide * Specified cgroup driver * Add CoreDNS deployment in kubernetes atomic * reno: add custom keystone endpoint\_type in configuration * [k8s\_coreos] use host-gw as flannel default driver * [k8s\_coreos] update kubelet args * [k8s\_coreos] enable CoreDNS addon * Fix the link to Cluster Template in quickstart * Add more details to example template * [suse] Build openSUSE Leap 42.1 OpenStack Magnum image * Ignore: Try pxe\_ipmitool since vbmc is used * update doc dcos\_centos\_v1/README.md * fix the devstack\_neutron's url * [k8s\_coreos] update to etcdv3 and kube 1.6 * Updated from global requirements * [k8s-fedora-atomic] fix multimaster cluster * Use 'virt\_type=kvm' in devstack vm if supported * Add release note and doc changes for kube dashboard * Update Steps for creating dib images * Updated from global requirements * Update doc 'functional-test.rst' * TrivialFix: Typo in launch-instances.rst * Add Command for using default docker log-driver * Updated from global requirements * Update api-ref about 'ca-show' * Pass a mutable target to oslo policy enforcer * CI: multinode job with larger flavors * Fix rexray systemd unit * update the detail of the latest fedora atomic image * informations -> information * Add 'keypair' to 'list all clusters' response * Updated from global requirements * Set clustertemplate:publish to admin only * [k8s\_coreos] Avoid regenerating certs on reboot * Support magnum-conductor multiple process workers * Enable custom keystone endpoint\_type in templates * [k8s\_coreos] Add kubernetes dashboard * Add kube dashboard and remove kube ui * Fix the API Microversions's doc * Added tempest to test-requirements * Adding quota unit test * [suse] Add DOCKER\_DEV to /etc/fstab * [suse] Remove defaults network from child templates * Updated from global requirements * Fix config type of copy\_logs from string to Boolean * Fix keystone auth\_uri and auth\_url * Replace "bay" with "cluster" in user guide * Update SUSE distro information in install guide * Add net creating in install-guide * Updated from global requirements * Remove kube-examples software configs * Fix CoreOS multi master with LB cluster creation * Fix CoreOS cluster creation and heat notify * Support dcos installation on centos vm cluster * Fix usage of the trustee user in K8S Cinder plugin * Fix gate: Revert mesos image to ocata * Remove old oslo.messaging transport aliases * Install client in install guide instructions * Fix database grant instructions in install guide * Add 'rm -f .testrepository/times.dbm' command in testenv * Update Fedora images * Format the quickstart doc * Remove log translations * Add reno for cluster\_user\_trust option * Fix db config * ci: Rename ssh key * Use 'os\_distro' instead of 'os-distro' * Add "ca-rotate" command to userguide * Unbreak gate * Move cover.sh to the tools directory * Add CoreOS/K8s recommended defaults to kube-proxy * Remove support message for using keypair UUID * Updated from global requirements * [k8s] Monitoring with Prometheus and Grafana * Fix some grammar or spelling de-normalization * Remove unused logging import * Update quickstart to use OpenStack CLI * Fix exception codes * Glance v1 is deprecated and removed in devstack [1] * Delete redundant Magnum::Optional::Neutron::FloatingIP * Indicating the location tests directory in oslo\_debug\_helper * Updated from global requirements * Updated from global requirements * Pass 'context' to create\_client\_files method * Fix api-ref with Sphinx 1.5 * Update docs to use positional name argument * Set k8s apiserver preferred address type arg * Set is\_admin flag correctly in RequestContext * Add WSGI script to deploy Magnum behind Apache * [suse] Add TLS support for k8s\_opensuse\_v1 driver * Update test requirement * Fix hyperkube\_image\_repo * Add admission control to CoreOS Driver * Prepare Kubelet for multiple container runtime * Remove reliance on osprofiler configuration section * Pass 'client', 'message' param to AuthorizationFailure Exception * Fix: mesos gate tests * Validate project-id on quota create * Magnum Development Policies * Missing root-ca-file parameter for proper service account support * [suse] Add SERVICE\_ACCOUNT\_KEY to Kuberneres cluster configuration * Add Kubernetes API Service IP to x509 certificates * Update reno for stable/ocata * Fix quota API get-all parameter type * Make INSECURE\_REGISTRY\_URL works for CoreOS 4.1.0 ----- * Fix some typos * Fix for cluster-update rollback issue * Add keypair to api-ref cluster create * Fix quotas API pagination * [doc] install 'curl' as a prerequisite * Use variables for hyperkube and kube version * Switch to kubernetes upstream python client * Updated from global requirements * Add reno: bp secure-etcd-cluster-coe * Updated from global requirements * Remove $myip when unnecessary and use KUBE\_NODE\_IP * Make KUBE\_ALLOW\_PRIV used for api server * Add microversion and release notes for quotas API * Don't enforce microversion for stats API * Fix CVE-2016-7404 * Remove heat-params sourcing * Improve consistency for SSL PATH accross template * Remove support for py34 * Don't enforce microversion for rotate CA cert API * Remove carriage return when getting user token * Use https instead of http for git.openstack.org * [mesos] Use latest build for mesos image * Don't create clusters of an unsupported type * Fix missing $ in CoreOS proxy conf * Use heat-params in systemd unit * Trivial: Fix typo in exception message * K8S: Allows to specify admission control plugins to enable * Use right no proxy settings for swarm master and agent * Remove unused enforce\_cluster\_types decorator * [k8s] Get logs of controller-manager and scheduler 4.0.0 ----- * Pass OpenStack-API-Version header in make-cert scripts * Make Kubernetes pods' health checks configurable * Upgrade to Fedora 25 * Updated from global requirements * Resource Quota - API documentation * Resource Quota - Limit clusters per project * Add release note for BP OSProfiler in Magnum * Fix: Pass external\_network to kube-minion * Updated from global requirements * Update MY\_IP to use curl and metadata instead of cut * Fix getting capacity in k8s\_monitor * Add an API to rotate a cluster CA certificate * Integrate OSProfiler in Magnum * Fix Ironic driver * Resource Quota - Adding quota API * Resource Quota - DB layer changes * Resource Quota - Add config option to limit clusters * Move scale managers at driver level * Move monitors at driver level * Fix LB heat template parameter name * [Doc] Update User Guide: User Examples * Updated from global requirements * Fix compatibility with novaclient 7.0.0 * Add debug-py34 to tox.ini * [k8s\_ironic] Move software configs out of minion * Magnum stats API documentation * [Mesos]Move software configs out of resource group * [Mesos]Move wait condition out of resource group * [k8s\_ironic] Move wc out of master resource group * [k8s\_ironic] Move wc out of minion resource group * Magnum stats API * [devstack] Copy bash\_completion script during magnum installation * Remove extra spaces * [Doc] Update quickstart Guide: Using a Kubernetes Cluster * Updated from global requirements * [swarm] Fix cert filename in swarm-agent service * Remove unused context variable in db api * [suse] Fix flanneld overlay network configuration * [swarm] Enable TLS in Etcd cluster * CI: Set storage driver to overlay * CI: Increase master-flavor size * [suse] Update security group for kube\_masters * [suse] Add min and max to flannel\_network\_subnet option * Make private network optional * Support magnum-api multiple process workers * Fix the incorrect initialization of context roles * used openstack cli in magnum devstack plugin * Use Kubernetes config to launch services pods * Fully clean up requirement.txt dependencies * [suse] Update k8s\_opensuse\_v1 driver * Remove the usage of MagnumObjectDictCompat from magnum\_service * [suse] Tune default value for docker\_volume\_size * Fix gate: caused by tempest(removal of "service" param) * Remove PrettyTable useless requirement * Modify variable's using method in Log Messages * [suse] Setting correct permissions for Kubernetes files * Updated from global requirements * Remove provision\_state parameters(specific to ironic) * Add cluster record to db right after API request * [k8s\_coreos] Enable TLS in Etcd cluster * [k8s\_coreos] Remove podmaster * Updated from global requirements * Removes unnecessary utf-8 encoding * Use correct context synching status * Make Docker proxy configuration consistent across template * Remove the usage of MagnumObjectDictCompat from certificate * Fix multiple typos in unit tests names * List all the possibilities of cluster's name through a list * Specification for Magnum stats API * Remove the usage of MagnumObjectDictCompat from x509keypair * Import magnum.i18n.\_ in driver/heat/driver.py * Updated from global requirements * Use UUID instead of "00000" for UniqueId * Update Swarm version to 1.2.5 * cors: update default configuration * Updated from global requirements * [suse] Allow k8s cluster without floating ip * [suse] add support of LBaaS v2 * [suse] Add proxy config * [suse] Fix template descriptions * Change gate Fedora Atomic image to the automated f24 build * Add docker-d options in sysconfig/docker * [install] Fix endpoint creation * Disable horizon, ceilomter and swift in gate hook * Consolidate heat network resources * Updated from global requirements * Missing lines in lb refactor for CoreOS driver * [k8s\_fedora\_atomic] Enable TLS in Etcd cluster * Remove docker\_volume\_size from functional-test * Disable horizon, swift and ceilometer * Move cluster status notifications out of driver * Add bashate checks to pep8 step * Add a SELinux policy to relabel files in /usr/local/bin as bin\_t * [doc|install\_guide] Fix 'host' config param in [api] section * Updated from global requirements * Factorize load balancer code into its own template * [ironic][doc] Updated ironic image build doc * [k8s\_fedora\_atomic] Remove podmaster * functional: don't create flavors if ironic testing * DIB elements to support dcos for magnum * Use keystone v3 for functional tests * [mesos]remove redundant security group * Disable lbaas from ci tests * func-test-docs: Use iniget and set concurrecy 1 * Move cluster status updates into driver * Refactor driver interface (pt 1) * k8s\_ironic: fix minion template * Add RESUME\_FAILED to cluster's status field * Remove underscores from Nova server names * Doc: update server type in userguide * Show team and repo badges on README * Updated from global requirements * Improve security for swarm * Remove KEYSTONE\_CATALOG\_BACKEND from magnum plugin * [trivial] Fix DIB element path in Readme * [suse] Add hidden attr to password in the Heat Template * Revert "devstack: Fix neutron configuration to run in OSIC" * Fix few typos in documents * Reduce security groups # for k8s coreos cluster * Use 'code-block' for pieces of code * Fix a typo * Updated from global requirements * Add Flatten Attributes Specification * Fix typo in cover.sh * Drop id suffix in launch-an-instance guide * [docs]Update quickstart guide to use cluster-config command * Set config param [DEFAULT]/host to hostname * Combine master security groups in k8s driver * Remove out-dated method for installing in Devstack * [install] Update rabbitmq configuration * Updates Documentation for non-ID Params * Make cinder volume optional * Add insecure option in functional tests for SSL endpoints * remove extra bracket from script in docs * typo: Fix in docker storage configuration * Updated from global requirements * Restart swarm infra containers if deleted * Remove unused configure-flannel.sh * Fix: InvalidParameterValue Exception not raised correctly * Updated from global requirements * Add use of label 'swarm\_strategy' in userguide * Support scheduler strategy for swarm cluster * Updated from global requirements * Updated from global requirements * Add user-domain in role creation * [instll] Update a more simple rabbitmq configuration * Add http\_proxy\_to\_wsgi to api-paste * Enable DeprecationWarning in test environments * [suse] configure flanneld on master node * [suse] Update copyright/ownership information * Fix magnum cluster-update error * Added reno for stable/mitaka and stable/liberty * [suse] Sync with cluster drivers * Use function is\_valid\_mac from oslo.utils * fix cover.sh to allow db version changes without ut * [Trivial] Fix two typos in magnum * add some tests for db * add some tests for cluster and clustertemplate api * Remove pod/svc/container object reference from doc * Move cluster delete method to driver * Replace naked exceptions in barbican\_cert\_manager * corrected hyperlink typo fix * Updated from global requirements * add cluster and clustertemplate to fake\_policy.py * Enable release notes translation * Fix magnum-template-manage * Add docker daemon systemd proxy variables * Remove unnecessary fingerprint of MyObj object * Fix typo: clustser-->cluster in python\_client\_base.py * Make k8s cloud config consistent * Centralize config option: docker\_registry section * Centralize config option: urlfetch and periodic * Clean rc from unit tests * Fix the config args of kubernetes service * Fix PEP8 issues, OpenStack Licencing and Version details * Remove rc from policy.json * Disable cert checks while talking to endpoints * Allow keypair to be added during cluster create * Cluster Drivers * Updated from global requirements * [api-ref] configure LogABug feature * Remove fixed\_network from functional tests * devstack: Fix neutron configuration to run in OSIC * [coreos] Allow k8s cluster without floating ip * [api-ref] Remove temporary block in conf.py * Add dns server access confirmation * Revises 'json' to 'JSON' and 'yaml' to 'YAML' * Remove not really translated file * Implement mesos cluster smart scale down * Fix failure of systemd service kube-ui * [k8s\_common]Remove enable-etcd.sh * Fix typo 'mesoscluster' to 'mesos-cluster' * Fix K8s load balancer with LBaaS v1 * [mesos]Fix output param: mesos\_slaves\_private * Remove safe\_utils.py * Remove yamlutils.py * Remove k8s\_manifest.py * Remove Exceptions for Container/Pod/Service * [mesos] Make dib scipts executable * Remove unnecessary use of sudo in k8s scripts * Using sys.exit(main()) instead of main() * Change several RabbitMQ config settings * Updated from global requirements * Remove default=None when set value in Config * Fix quickstart guide URL * Fix typo 'duplcate' to 'duplicate' in status.yaml * Update Fedora Atomic element from 23 to 24 * Centralize config option: x509 section * Centralize config option: keystone\_auth section * Centralize config option: trust section * Centralize config option: certificates section * Centralize config option: docker section * Centralize config option: service section * Centralize config option: rpc periodic section * Centralize config option: utils section * Centralize config option: database section * Centralize config option: paths section * Centralize config option: cluster\_heat section * Centralize config option: cluster\_template section * Fix k8s\_fedora to work with cinder volume driver * Centralize config option: conductor section * Centralize config option: cluster section * Centralize config option: all clients section * Centralize config option: api section * Add Horizon and Native Clients to user guide * Update name of ubuntu-mesos image * Split swarm atomic template * Updated from global requirements * Register master node but make it non schedulable * Remove duplicate AUTH\_URL parameter * Remove unnecessary setUp and tearDown * Init magnum centralize config * Update reno for stable/newton * Delete coreos driver elements directory 3.1.0 ----- * Updates Ubuntu Mesos build * [install] Fix keystone\_authtoken and trust sections * Add optional magnum-ui in quickstart * Restrict server type only to vm/bm * delete python bytecode including pyo before every test run * Updated from global requirements * [install] Fix the cli install instructions * [install] Fix optional services bullet-list * Fix the order of enabling devstack plugin * Update kubernetes external load balancer dev guide * [suse] Fix OS::stack\_id in kubeminion * Use heat devstack plugin * [install] Add cli install in IT * [install] Add launch an instance section * [install] Update required services and remove bay * Add exceptions to cluster db to show failures * [suse] Sync heat template version with other drivers * [suse] Rename bay to cluster * TrivialFix: Remove logging import unused * Change the type of flannel\_network\_subnetlen to 'number' * Create sysconfig mount for kubernetes controller mgr * Import environment variables from testenv * Updated from global requirements * Split k8s atomic vm and ironic drivers * Create bay/cluster api reference * Disable lbaas on k8s-ironic job * Create baymodel/cluster template api reference * Add Scaling section to User Guide * Add Support of LBaaS v2 API * Rename Bay DB, Object, and internal usage to Cluster * Fix swarm functional tests * Add support for overlay networks in Swarm * Fixed fetching api\_server address * Update fedora image for ironic driver * Improve unit test coverage for cmd/db\_manage.py * Make magnum manage\_template read config file and increase coverage * Remove magnum service Dockerfile * Factor out common k8s definitions and mappings * Consolidate enable docker registery fragments * Clean imports in code * Add rexray volume driver to Swarm * Fix typo in quickstart guide * Update documentation with bay/cluster version info * Add python-dev and kpartx to mesos img build * Fix mesos image dockerfile elements location * Fix dev quickstart pointer to mesos img build * Consolidate configure docker storage fragments * Fix release note * Updates drivers from BayModel to ClusterTemplate * Rename BayModel DB, Object, and internal usage to ClusterTemplate * Rename bay to cluster in certificate object and references * Correctly raising MagnumServiceNotFound exception * Update service-list output in quickstart * Use cls in class method and remove unused CONF * Add missing release notes * Updates CONF usage from bay to cluster 3.0.0 ----- * Rename Bay to Cluster in functional tests * Include version info in bay/cluster show operation * Install Guide: Set bug project * Fix bay status: after bay-delete status is not DELETE\_IN\_PROGRESS * Correction in quickstart * Fix incorrect reference to bay-template-example.html * Revert "Update mesos slave to mesos agent" * Create certificates api reference * Create mservices api reference * Create version api reference * Updated from global requirements * Init api-ref structure and requirements * Compare test coverage with the master branch * Cleanup coverage configuration * Removed not required style.css file * To use cinder with rexray downgrade to version: 0.3.3 * Rename Bay to Cluster in docs * Add cluster to cert commands * Add history for API versions supported by magnum * Use werkzeug to run Magnum API with SSL * Make templates env path be const variable * Allow k8s cluster without Floating IP * Bay to Cluster api cleanup * Openvswitch image build * Get mandatory patch attrs from WSME properties * Clean up docstrings in BayModel * Simplify test\_create\_list\_sign\_delete\_clusters() tempest test * Restrict magnum service name * Updated from global requirements * Revert "Use symlinks for common template files" * Add Mesos labels and summary for labels * Rename Bay to Cluster in api * Updates k8s example rc to use correct label * Remove reference: 'modindex' from releasenotes documentation * Use upper constraints for all jobs in tox.ini * Add floating\_ip\_enabled field to baymodel * Increase in UT coverage * Fix tempest.conf generation * Align k8s CoreOS with atomic: add proxy config * Update to User Guide * Rollback bay on update failure * Set bay status: DELETE\_IN\_PROGRESS before updated by poll * Add i18n translation for Log messages * Increase test coverage * Fix an issue on kube-proxy in CoreOS bay * Fix the CoreOS fragment write-kubeconfig.yaml * Correct the get\_file patch in CoreOS template * Increased UT of magnum/api/app.py * Updated from global requirements * Add test for update baymodel public * Improve unit test coverage for cmd/conductor.py * Improve unit test coverage for cmd/api.py * Improve unit test coverage for common/service.py * Change stacks:global\_index heat policy to context\_is\_admin * Support for async bay operations * Fix indentation and if expressions in make-cert * Use memory mode for sqlite in db test * Functional: validate OpenStack resources * Use symlinks for common template files * Remove ReplicationController object * Add openSUSE driver support to Magnum * Increased test coverage * Remove Invalid README.md for mesos * Remove Invalid README.md for k8s * Makes config file generation reproducible * Add functional test for k8s ironic * Fix ironic template * Re: Remove dependency of metadata service * Support HA for k8s coreos bay * Pass missing variables to heat-params * Updated from global requirements * Use kubelet-wrapper provided by CoreOS * Remove kube-user.yaml * Fix copying logs from nodes * Fix for enum type docker\_storage\_driver * Updated from global requirements * Add microversioning support for methods * Correct hyperlink syntax in userguide * Restricted Magnum service state to 'up' and 'down' * Add support for master elected component * Drop MANIFEST.in - it's not needed by pbr * API: restrict length of bay's name to 242 * Updated from global requirements * Remove container object * Add TLS section to User Guide * Add functional test for public baymodel * Add hacking rule for explicit import of \_ function * modify the home-page info with the developer documentation * Add functional test for image/flavor validation * Create a base class for tempest tests * Add Bay section to User Guide * Remove unnecessary code * Consolidate heat fragments * Fix some simple mistake * Bay name must start with alphabets only * k8s\_coreos\_driver: cleanup file naming * Fix global stack list in periodic task * De-duplicate the decouple-LBaaS-related files * Corrected import module in gmr.rst * k8s: Remove unused volume mount for kube-proxy * Added hacking check to ensure LOG.warn is not used * Fix typo in baymodel param * Move common/fragments into templates directory * Pass private ip address to scale manager * Updated from global requirements * fix bug for configure-kubernetes-minion.sh * Fix the permission of these files -rwxr-xr-x * Add Mesos section to User Guide * Set swarm api\_address protocol to tcp on all cases * Correction in heat template description * Add check on docker\_volume\_size * [install] Add debian and ubunutu IGs * [install] Refactor configuration in IG * Updated from global requirements * Removed unwanted files * add hacking for assertIsNotNone * Fix wrong COE name in template * modify test\_assert\_is\_not\_none * Formatting userguide * Remove repeated WaitConditionHandle resource * Update mesos slave to mesos agent * Updated from global requirements * Add i18n support for some ERROR message * Replace "LOG.info(\_" with "LOG.info(\_LI" * Fix for k8s bay creation stall * Allow swarm cluster without LBaaS * Fix bug for write-kube-os-config.sh * Support the OpenStack-API-Version header * Updated from global requirements * Allow mesos cluster without LBaaS * Replace assertEqual(None, \*) with assertIsNone in tests * Correction in kube-ui-service.sh script * Fix OS::stack\_id is set as stack id instead of private ip * Remove unused LOG to keep code clean * Nit documentation formatting * Add Python 3.5 classifier and venv * Update default version of heat template * Correct the rest of the reraising of exception * k8s coreos bay driver * Bay driver: k8s Fedora Atomic * Add "WAIT\_CURL" parameter to the template of swarm * tempest: Allow the old-style name project name * Nit document formatted * Updates microversion root and error messages * Remove dependency of metadata service * Add description to the output\_key of stack * Correct reraising of exception * Move common bay drivers fragments in common dir * tempest: Don't hardcode external network id * Fix string declaration in periodic.py * Misspelled text corresponding to method 'get\_template\_definition' is commited * Change the type of flannel\_network\_subnetlen to 'number' * Delete unused discovery\_url for swarm * Allow k8s cluster without LBaaS * Mesos-Ubuntu bay driver implementation * Bay driver implementation * Move Initialization of variables inside if/else * Improve validation for the external network parameter * Add a explanatory text when flavor is None * Bay\_create\_timeout should be set to 60 default * Fix typos for Magnum * Fixed typo for Availability * Fix typos in resource-quotas.rst * Add Bay Drivers section in user guide * Updated from global requirements * Change service name from "magnum" to "container-infra" * Delete certs when deleting bay * Add fixed\_subnet field to baymodel * Improve unit test coverage * Validate discovery url when create a bay * Fix typo in create-trustee-user-for-each-bay.rst * Fix typo in async-container-operation.rst * Add Baymodel section to User Guide * [install] Add obs install-guide * Fix file permission in dib elements * Add master\_lb\_enabled field to baymodel * Allow Bay templates to include Heat environments * Pass some common cert related arguments to clients * Fix DIB dependencies for >= Fedora 22 * Fix docker storage drivers configuration * Updated from global requirements * Delete unused cert\_group variable * Modify mesos template to support removal policy * Add x509keypair\_cert\_manager to store certs in DB * [install] Add install guide from template for rdo * Add Swarm section to User Guide * Remove K8sResourceBase * Updated from global requirements * Make 'signing csr' accept Unicode CA Private key * Updated from global requirements * Modify the manual-devstack document for copying api-paste.ini * Wrong parameter in InvalidName exception message * Auto generate Bay/BayModel name * Use kojipkgs for diskimage-builder * Moving feroda atomic image to the bay driver folder * Fix typo in open-dcos.rst file * Load heat-params before setting nounset * Updated from global requirements * Remove unused POT files * Add Kubernetes section to User Guide * Gate: fix the credential object type error * Change here doc limit strings to fix EOF in EOF * Fix cli usage to get ca.crt and client.crt * Set 'nested\_depth=2' when calling heat.resources.list * Updated from global requirements * Run the unit tests to test magnum objects * First check whether output\_value is None * Duplicated parameter definition in template * Put fault info of bay resources into bay-show outputs * Delete duplicate statement * Support trustee\_domain\_name in config file * Fix get\_coe\_valodator() clear unused Validator * Fix indentation in install-guide * Updated from global requirements * [install] Add install guide from source * Update microversion header to include service type magnum * Fix string format in cmd/conductor * Remove service object * Spec for Open DC/OS and Magnum Integration * Add docker-storage-driver attribute to baymodel * Update swarm templates to use Heat resources * Fix Kubernetes-related deprecation in quickstart * Update for Swarm Bay quickstart docs * Add Bay Drivers specification * Updated from global requirements * X509keypair cleanup * Delete unused \_admin\_client variable * Updated from global requirements * Support using insecure registry for k8s COE * Fix an EndpointNotFound error * Updated from global requirements * Use fixtures.TempDir in unit tests * Remove pod object * Remove redundant utils code * devstack: fix magnum service name in is\_magnum\_enabled * Fix spelling error on get\_docker\_quantity method * Use oslo\_utils.is\_int\_like support * Use oslo\_utils.uuidutils support * Remove redundant exceptions code * Add accidentally deleted test\_hooks.py * Gate: fix tempest config error * Update Magnum service name and description * Updated from global requirements * Document usage of notifications * Add insecure\_registry column to baymoddel * Remove k8s APIs pod, rcs, svc and container API * Register k8s node but make it unschedulable * Add mesos\_slave\_executor\_env\_variables validate * Fix the swarm test for gate * Add Storage section in user guide * Updated from global requirements * Emit notifications when bay operations get executed * Fix two issues on k8s bay * Update Image section in user guide * Added "Choosing a COE" to user guide * Move k8s specific terms to k8s section * Code refactoring in conductor/k8s\_api.py * Honor insecure and cafile options in a trustee session * Updated from global requirements * Fix the quickstart guide for using kubectl * Updated from global requirements * Correct attribute name in TestListBayModel * Update documentation to use native APIs * Updated from global requirements * Cleanup in Mesos template * Add troubleshooting steps for trustee creation * Always expand Baymodel fields * Correct parameter order for assertEqual() method * Add mesos\_slave\_image\_providers validate * Corrected spelling mistake in quickstart.rst * Revert "Remove KUBE\_API\_PUBLIC\_ADDRESS" * Updated from global requirements * Enable TLS support for k8s CoreOS * Use the latest atomic image name * Start using fedora atomic images that live in our mirrors * Add mesos\_slave\_isolation validate * Add tox test for k8s coreos bay * Updated from global requirements * Fix parameter mismatch in CoreOS templates * Copy logs if test failed and bay nodes existed * Remove KUBE\_API\_PUBLIC\_ADDRESS * Update docs to use the latest image link * Replace tempest-lib with tempest.lib * Add docker registry support for swarm * Updated from global requirements * [Trivial] Remove executable privilege of doc/source/conf.py * Updated from global requirements * Functional: Add prefix when copy logs on failure * Update outdated doc index file * Cleanup some validation functions * Healthcheck Middleware * Add script to validate fedora atomic images * Heat params are different in swarm master and swarm node * Grab heat-params for debugging * Updated from global requirements * Enable Mesos Bay export more slave flags * Log copy for failed functional tests cannot be disabled * devstack: Use magnum-api and magnum-cond for services * Fix container-create memory not passed * Imported Translations from Zanata * Fix specs reference rst format * Remove constraints envs from tox.ini * Fix post jobs * Imported Translations from Zanata * Use k8sclient library * Gate: Remove neutron-lbaas devstack plugin * Functional tests should support DNS nameserver config * Fix bashisms in k8s conf minion template fragment * Fix bashisms in k8s os config template fragment * Docs: switch to neutron-lbaas plugin * Move project-configs to gate hook * Updated from global requirements * Fix bashisms found in swarm template fragments * Config docker registry in devstack * Add support for docker registry * Updated from global requirements * Fix the rst url format * Add subjectAltName back to CSR config * Fix bashisms found in shell scripts * Fix uuid cases with real UUID * replace wsexpose by magnum.api.expose.expose * Add script to install image build dependencies * Fix doc for certificate * Format template * update doc for ca-show and ca-sign * Notify Heat only if kube-apiserver is running * Update Kube version for latest image * Fix two issues that broke the gate * Updated from global requirements * Doc: fix flannel etcd key * Fix wrong parameter while creating bay * Use fedorapeople for getting fedora image * Fix an incorrect key path on copying logs * Bay can not be deleted by other users in the same project * Use trust for tls cert generation in swarm * Add cpu util to K8sMonitor * Add reno to Magnum * Updated from global requirements * Magnum's tox test should respect upper-constraints * Switch to Atomic 23 * Revert "Gate: fix AttributeError: load\_pem\_x509\_csr" * Update Using Container Volume Integration Feature doc * Add Container Volume Model into Kubernetes Heat Templates * Add cpu util to MesosMonitor * Generate fedora-atomic images using dib * Fix config error * Fix typos in Magnum files * Cleanup duplicated auth\_url in k8scluster/master template * Remove the "Patch" function * Use trust for tls generation * Fix usage of registering magnum endpoint * Fix bashisms in enable-kube scripts * Refactor Keystone client with keystoneauth * Remove unnecessary blank at command line usage * cleanup usage of LOG.debug in magnum * Add hacking check to ensure not use xrange() * Allow update baymodel's public field even if referenced * Cleanup container client api rewrite function * Release certs/trust when creating bay is failed * Allow show public baymodel * Use bay to init K8sAPI instead of bay\_uuid * Allow to parameterize image name in tests * Make kubernetes image version united into a variable * Gate: fix AttributeError: load\_pem\_x509\_csr * Raise OperationInProgres(400) when deleting bay conflict 2.0.0 ----- * Add flannel's host-gw backend option * Add the container volume integration document * The type of node\_count is number * Fix config parser error magnum-template-manage list-templates * Replace hardcoded eth0 interface in scripts * Cleanup dict usage in bay\_conductor * Pass host\_config if docker api version >=1.19 * Add Image Management section in User Guide * Add tests for container action policy * Functional: Remove unused log copying * Refactor bay\_conductor to split trust methods * Rename flavor name used in gate tests * register the config generator default hook with the right name * Fix baymodel with invalid parameter can updated * Replace deprecated LOG.warn with LOG.warning * devstack: Comment out some environment dependent neutron settings * devstack: Add python3.4-dev to quickstart prereqs * Remove the redundant code * Moved CORS middleware configuration into oslo-config-generator * Remove bandit.yaml in favor of defaults * Mark trustee\_domain\_admin\_password secret * Pass target in enforce * Bay status returns None initially after create * Spec for asynchronous container operations * Enable SELinux in swarm bay * Add setup methods for trust config in dev document * Add missing cinder\_client config * Functional test for flavor validation in bay creation * remove devstack/create\_magnum\_conf\_magnum\_network * Functional: Wait for swarm bay creation * Remove method which has no rpc calls * Load wsgi app(api) with paste.deploy * Revert "Turn selinux back on after cloud-init" * Fix log message error when create trustee failed * Functional: Set private key outside of remote\_exec * Updated from global requirements * Remove minion dependency on master * Add external\_network unit test for post baymodel * Add flavor\_id unit test for post baymodel * Add auth\_url * Magnum api show wrong bookmark link for baymodels * limit access to certificate and container:create * Fix baymodel with invalid parameter can created * Adds standardised error messages * Add Container Volume Model into Mesos Heat Templates * Fix Definitions part for container-networking-model.rst * Use obj\_attr\_is\_set to check whether an attr is set in oslo\_versionedobject * handle bytes list in api middleware * Correctly compare utf8 strings * Fix x509 cert generation python3 compability * Use str() to generate IOError exception message * Fix the jenkins run script * Ignore the generated config file * Add py34 to tox envlist * Copy logs on test failure * Add trust info * Add hidden attr to password in the Heat Templete * Use exception.faultstring instead of exception.message * Do not use translate to delete chars * Convert bytes to string in get\_id for python3 compatibility * Encode string before hash it * Use specific key to sort list of dicts * Use six.moves.reload\_module instead of builltin reload * Avoid compare None type using min() * Return correct object type * Fix api access with public acl routes * Get region\_name that volume\_driver rexray region\_name needs * Initial command-line interface documentation * Improved tests for updating bay properties * Remove unused attribute "ssh\_authorized\_key" * Add skipped RST files to toctree * Resource Quota - Introduce Quota Table * certificate sign with a non-existing cert should throw HTTP 400 * Remove redundant password when create create\_trustee * Remove duplicate X-Roles * Rename get\_rpc\_resource to get\_resource * Updated from global requirements * Added documentation to BayModel attrs * Add etcd troubleshooting * Add Flannel troubleshooting * Init oslo\_context before magnum context init * Updated from global requirements * Fix gate for client and devstack * Rename network driver name in Validator class * Avoid to create $SCREEN\_LOGDIR * Add trust info into heat params * Replace string format arguments with function parameters * Add master\_flavor\_id to baymodel data funtion test * Updated from global requirements * Add tempest logging to bay\_client and test\_bay helper methods * devstack: Comment out logging configuration * Add \`q-lbaas\` to manual-devstack.rst * Add missing test-requirements * Create a trustee user for each bay * Fix misleading M310 unit test outputs * Updated from global requirements * Fix string formatting bug * Cleanup unused conf variables * Updated from global requirements * Add magnum certificate api tests * Bay test cleanup * Reduce memory consumption of gate tests * Make bandit job voting * Turn selinux back on after cloud-init * Enable swift services * Fix invalid import order * Updated from global requirements * Fix gate issues with functional-api job * API: Move validate\_properties to REST API layer * Change BayModel#coe type to wtypes.Enum * Change Bay#status type to wtypes.Enum * Updated from global requirements * Remove node object from Magnum * Enable Tempest without devstack * Minor tweak to simplify api validator code * Correct internal links syntax error * Add more types of status to Bay's status * Revert "Fix socket descriptor leak" * Update functional test docs * Propose Magnum Resource Quota * Add Pod, Service, Replication Controller terms * Fixed a DBerror on reducing node\_count on bay * Update the spec for container network attributes * Troubleshooting Kubernetes networking * Add func test to validate baymodel-update referenced by bay * Removed unused config coreos\_discovery\_token\_url * Networking user guide * Replace logging with oslo\_log * Use keystone v2.0 in gates * Cleanup MagnumService Object usage * Add introduce doc how to generate magnum.conf.sample * Remove unused hacking rule from HACKING.rst * Add python 3 support * Validates baymodel volume\_driver patch requests * Validates baymodel volume\_driver requests * Fixed an issue that prevent kube-proxy to start * Add initial terminology definitions * Document how to create a CoreOS bay * Spec for trust * Use magnum-config-generator.conf to generate Magnum config sample file * Updated from global requirements * Remove dev prefix in magnum/doc/source/dev * Adds volume\_driver in db api * Fix typo in comment of several files * UT: cleanup in API test cases for response attr verification * Fixing typo in comment of several files * Revert "fixed\_network should be fixed\_network\_cidr" * Updated from global requirements * Improve Tempest credential provider usage * Fix the network\_driver update invalid parameter can be update * Add troubleshooting for network * Updated from global requirements * Add volume to Kub master * properly sign a certificate without bay name * Fix the CoreOS template definition * Remove redundant checks * Updated from global requirements * "notification\_driver" from group "DEFAULT" is deprecated * Fix the CoreOS Heat templates * Add initial documentation for troubleshooting gate * Imported Translations from Zanata * Change 404 to 400 when resource not found exception raises * Add debug testenv in tox * Updated from global requirements * Revert "Pass environment variables of proxy to tox" * Gate: Fixed an Unauthorized error on api tests * Add bay status attr value list for API consumer * Use oslo.i18n in magnum/api/controllers/v1/service.py * Enable test\_magnum\_service\_list\_needs\_admin * Do not use inner class of glanceclient * Do not use inner class of heatclient * Do not use inner class of novaclient * Fix ignored E711 rule and remove this exception * Proxy support for Mesos cluster * Remove unnecessary setting of default node\_count * Use bay name as Mesos cluster name * Delete negative case of create bay without name * Add the k8s and mesos proxy doc * Highlighted NOTE in dev document * Disallow updating baymodel when it is referenced * Fix doc comment for default value * Fix doc comment for \`baymodel\_id\` attr * A bay without name should be successfully created * Updated from global requirements * Add magnum bay api tempest tests * Propose Magnum Volume Integration Model * Add mandatory\_attrs to BayModelPatchType * Highlighted NOTE in magnum-proxy.rst * Added Keystone and RequestID headers to CORS middleware * Don't add exception in msg when using LOG.exception * Increase size of Docker volume size * Add policy enforcement unittest to magnum\_service * Add copying tempest.conf instructions to guide * Fix Docker storage configuration for Swarm * Update kube-ui to v4 * Updated from global requirements * Skeleton for User Guide * Fix the content of 'discovery\_endpoint' not show up in exception * Add proxy for mesos * Skeleton for Troubleshooting Guide * Add the lost M338 in Hacking * Fix socket descriptor leak * Swithc to using dynamic credentials in tempest tests * oslo\_messaging requires stop() before wait() * (Quick-Fix) Adds back tempest identity back to gate * Functional: Fix mesos baymodel creation case * Fixed the incorrect policy enforcement * WSGI enfore fails should return 403 instead of 500 * Updated from global requirements * Remove redudant code * HTTP 400 instead of 500 when Unicode Bay name * Correct wrong parameter passing when create k8s\_api in k8s monitor * Functional: Add mesos functional bay creation basic testing frame work * Do not use \_\_builtin\_\_ in python3 * Trivial: Remove vim header in source files * Use six.moves.reload\_module instead of builtin reload * Devstack: Fix typo of MANGUM\_GUEST\_IMAGE\_URL * Python 3 deprecated the logger.warn method in favor of warning * Raise exception when failed to get discovery\_url * Trival: Remove unused logging import * Devstack: support download ubuntu image * bay-show doesn't return stack\_id * Remove oslo-incubator code from Magnum * Use cliff instead of cliutils * Keep py3.X compatibility for urllib * SIGUSR1 is deprecated in Guru mediation * Migration to utilize tempest plugin * "lock\_path" from group "DEFAULT" is deprecated * Replace dict.iteritems() with dict.items() * Separate flavor between master and agent node * Gate: Fix docker swarm disconnect issue * Move swarm-agent out of swarm master node * Updated from global requirements * Make kubernetes API client generic * Gate: Attempt to fix a memory allocation error * Clean up baymodel query of usage from a bay * Object: Add BayModel as an ObjectField to Bay object * Errors in docker registry configuration * Enable docker registry in heat template * Gate: Fixed an empty service catalog error * Move Kubernetes proxy to the container * Remove baylock * API: enforce bay type when do rc/service/pod api actions * Update dev-quickstart.rst * Improve tox to show coverage results * Updated from global requirements * Change $LOGFILE path in configuration devstack * API: add filters when try to list containers * Object: Add filters to contianer list * Create trust\_id for bay * Handle the case that stack has no "outputs" * Always log if disconnect from docker swarm * Copy Docker containers configs/logs * Updated from global requirements * Add retrieve\_bay\_uuid in conductor\_utils * The type of number\_of\_masters should be int not string * Updated from global requirements * use wild card for passing env variables * Refactor image check in Baymodel * Validate image when create a Bay * Avoid to use common.cert\_manager directly * Swarm: Cleanup of swarm heat template * Avoid to use keystone CLI in doc * Fix mesos monitor for handling multiple masters * Make consistent usage of mock.patch decorators * Refactor keypair existence check in Baymodel * Consolidate code for docker conductor tests * Enable HA mode for mesos bay in Magnum * Enable HA mode for mesos bay in Heat templates * Fix wrong exception messages * Add Kubernetes podmaster * Add Kubernetes UI * Share get\_discovery\_url in templates * Performance: leverage dict comprehension in PEP-0274 * Remove Python 2.6 classifier * Functional: only copy logs on exception * Objects from Bay - Pods * Add missing bay\_create\_timeout to bay object * Wait more time after swarm bay creation before doing functional testing * Hide user credentials * Register neutron client option * Functional: Raise Exception if bay created failed * Remove circle reference * Swarm: Add swarm master HA support * Document how to download the mesos image * Objects from Bay - Services * Decoupling magnum service from periodic task * Optimize "open" method with context manager * Validate keypair when create a bay * Fix typo in db api doc string * Fixes for magnum objects doc string * Add support to set env to a container * Validate external network when create a bay * Updated from global requirements * Functional: Use Magnum client to test container actions on Swarm bay * Swarm: Split swarm.yaml to swarmcluster.yaml and swarmmaster.yaml * add neutron client support * Remove hardcoded default docker client setting * Docs: specify --docker-volume-size for swarm bay * add unittest testcase for Openstack Nova client * Validate baymodel's flavor when create a bay * Fixed typo in the dev guide for Mesos * Remove temp fix for new oslo.versionedobjects * Add a global var to maintain swarm version * Improve yml template test case * Chmod enable-etcd.sh 1.1.0 ----- * Record diagnostic info from bay nodes * Swarm: add proxy for etcd service * Remove typo in magnum-proxy.rst * Functional: Add container creation/deletion on swarm bay * Adding dev-notes for try-catch block in periodic task * Cleanup baymodel operations in conductor api * Updated from global requirements * Refactor Mesos templates * Adds Magnum Container Network Model to Swarm * Changes Swarm Bootstrapping from Public to Etcd * Pin oslo.versionedobjects version * Add support for different disk bus * Updated from global requirements * Import option before using it * Bay: Update node's ip addresses even bay creation failed * Updates Swarm Heat Templates to Support Container Network Model * API: use baymodel\_ident to update a baymodel * Make bandit included in test-requirements.txt * Updated from global requirements * Add mising requirements * Adds Cinder Volume Support to Swarm Bay Type * Objects from Bay - Replication Controller * Delete kube-register * API: Add debug message for every API call * Save functional testing files after testing done * Fix typos * Add the description of the output parameters to the Mesos * Remove unused opts * Monitor driver for mesos bay type * Updated from global requirements * Add functional test cases for swarm baymodel/bay * Add Magnum config for default network driver per COE * Make server.key/client.key as private in k8s node * always use constraints * Add -constraints sections for CI jobs * Swarm: Add TimeoutStartSec=300 to docker service * Updated from global requirements * Add iptables rule to listen m-api * Create BayModel with provided(VM/BM) server type * Rename heat-kubernetes, heat-mesos, docker-swarm * Generate missing baymodel sample configs * Update deprecated option for docker * Functional: Add testcase of tls\_enabled bay creation case * Update functional testing doc * Swarm: Add description for Heat Template output * Removed old k8s python client * Update usage of glance client * Swarm: Map master address to api\_address based on TLS * Added 'master\_addresses' to Bay in API * Removed personal SSH key name and assigned value as testkey * Unify common output keys across Heat templates * Minor fixes for the functional test guide * split out k8s and api functional testing * Object: refacor of x509keypair.py * Replace oslo\_utils.timeutils.isotime * Devstack: Use HOST\_IP to set MAGNUM\_SERVICE\_HOST * Revert "Fix the neutron-lbaas agent config" * Update functional test document * Fix typo error * Enable network services at Kub master * remove default=None for config options * Add support for allowable network drivers configuration * Use oslo\_config PortOpt type for port options * use importutils in monitors.py to avoid cyclic imports * Document how to run functional test locally * Monitor driver for k8s bay type * timeutils.utcnow should be used instead of datetime.datetime.utcnow * Imported Translations from Zanata * Fix the neutron-lbaas agent config * Use \_assert\_has\_(no)\_errors() in hacking tests * Added CORS support to Magnum * Improve tox.ini to easy developer's life * Raise exception when adding an existed attribute while update bay * Use assertIn and assertNotIn * Improving comment in monitors.py * Use assertIsInstance instead of assertTrue(isinstance(a, b)) * Avoid JsonPatch twice * Use assertIsNotNone instead of assertEqual(\*\* is not None) * Use assertTrue/False instead of assertEqual(T/F) * Updated from global requirements * Use assertIsNone instead of assertEqual(None, \*\*\*) * Add bay filter to container * Upgrade to Swarm 1.0.0 (production ready release) * Fix argument order in assertEqual to (expect, obs) * Use oslo\_config IPOpt support * Update devstack doc to cover the latest atomic image * Remove unnecessary parameter * Fix the failure to scale-down k8s cluster * Fix exception when create bay failed * The default of filters should be an empty dict * Fix k8s CLI to work with Bay name * Kubectl configuration for certificates * Refactor MagnumException to reduce complexity * Refactor config setup to reduce complexity * Refactor periodic task sync\_bay\_status * Reduce complexity of poll\_and\_check method * Add functional\_creds.conf to .gitignore * Add doc8 to pep8 job * Some improvement in swarm cluster add-proxy.sh * Fix docker proxy config file not correctly seting on k8s master * Cleanup template formatting * Add proxy for k8s 1.0.0.0b1 --------- * Some fixes or improvements of quickstart guide * Several fixes for the TLS guide * Fix incorrect usage of CertManager in k8s\_api * Split test\_bay\_conductor tests * Fix a 409 failure on bay-update * Open port 6443 in security group for k8s bay * Fix bay-create failure without "name" * Fix registration failure caused by TLS support * Document how to enable barbican in devstack * Fix command line in document example * Fix swarm monitor exception * Read auth\_uri from config file and remove is\_public\_api * Move security group setting to kubecluster.yaml * Upgrade to Swarm 0.4.0 (latest) * Update Kubernetes examples * Added a guide to explain how to use secure Kubernetes API * Fix wrong doc output * Adding new test case to do heat yaml file validation * Fix mesos build image error * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Functional tests for magnum service * Modify admin\_api policy rule * Baymodel create should allow no network\_driver specified * Fix an occasional 400 error on functional gate * Pull metrics by using TLS enabled client * Update and clarify redis examples in quickstart * Make Kubernetes API call secure * Fix typos in document * Add TLS support in heat kubernetes * Fix comment container delete should accept both uuid/name * Move 'docker\_for\_container' to a common module * Move k8s resources test to TestKubernetesAPIs * Fix swarm bay failure reporting * Enabled ceilometer using plugin model * Update Dev Quick-Start links to officail docs * Fix D001 Line too long error * Allow container memory size to be specified * Fix double-wrapping of exception in conductor * Fix TypeError at magnum API for service-list * Minor documentation correction * Add TLS support to container handler * Adding support for public baymodels * Remove unnecessary util method temporary\_mutation * Add versioned objects to docs.openstack.org * Adding Documentation for use of proxies in magnum * Remove name from test token * Set up temp files containing client TLS certs * Use dockerpy logs operation instead of attach * Reduce complexity of filter methods * Rename "insecure" to "tls\_disabled" * Swarm: Set to CREATE\_FAILED status if swarm services not started * Swarm: Fix NODE\_SERVICES in template * Remove unused DB API get\_rcs\_by\_bay\_uuid * Documentation update for 'magnum service-list' * Configure Ironic for Kubernetes load balancer feature * Configure CoreOS for Kubernetes load balancer feature * Configure Fedora Atomic for Kubernetes load balancer feature * Remove unused DB API and Service object API * Fixes Neutron security groups for Swarm Bay type * Removes --tls flag from docker and swarm-manager daemons * Adding API support for magnum service * Implement bay monitoring and notifications * Fix E251 unnecessarily ignored pep8 rule * Add details to developer quick-start Mesos section * Add heat template plugins to documentation * Create master tmptls for k8s ironic/coreos bay * Make network-driver check based on COE type * Add bay\_uuid to RC Read/Write API's * Add bay\_uuid to Service Read/Write API's * Add bay\_uuid to Pod Read/Write API's * Introduce BayType that declares valid COEs * Backend support for magnum service * Migrate to Kubernetes Release 1 (docs) * Update Developer Quick-Start to Kubernetes 1.0 * User guide for Kubernetes external load balancer * Adds client test for labels * Fixes Kubernetes Pod and Service Manifest Examples * Fix container status when showing a paused containers * Functional: Split python client functional testing case * Swarm: move write-docker-service.sh before other configure scripts * Move the code for local cert to the right place * Remove unused DB API and Pod object API * Swarm: Add configure-swarm.sh to configure docker-storage * TLS integration for latest pythonk8sclient * Add TLS to Docker-Swarm Template * Eggnore the .egg directory * Remove ERROR\_ON\_CLONE references * Enable barbican cert manager in devstack * Use api version 1 to set image property * Add TLS support in Magnum * Use --max-complexity flake8 rule * Fix H405 and E131 ignored pep8 rules * Unwedge the gate by only enabling barbican once * Fix container action debug log not accurate * Docs update for new fedora atomic 1.0.4 * Fix funtional gate: specify missing network\_driver * Sync the description with Kubernetes Release 1 version * Code refactor for keystoneclient * Add registry to template * Functional tests with Tempest - BayModel CRUD * Validates baymodel network\_driver requests * Change ignore-errors to ignore\_errors * Migrate to Kubernetes Release 1 * Enabled ceilometer services using new model * Adds labels support to baymodels * Fix naming of variables/classes in tests * Updated from global requirements * Adds network\_driver Support of Container Network Model * Refactors Heat templates for Container Networking Model * doc8 all documentation and fix doc style * Add registry\_enabled to api and db * Readme : Change swarm\_manager to swarm\_master * Temporarily remove dependency on package certifi * Change swarm\_manager to swarm\_master * Allow unicode text as CSR * If headerParams = None, don't use it to .update() * Fix calling parameter at get\_cert/delete\_cert * Add a link versioned object document * Update documentation for generating k8s v1 client * Avoid to use eval in pythonk8sclient * Fix missing value types for log message * Check file existence in local cert manager * Add test to local\_cert\_manager * Update swarm discovery url * Delete certs while deleting bay * Enable Barbican in devstack * DB Migration does not work for SQLite backend * Add version hashes to enforce version updates * Swarm agent to get templated proxy values * Porting function\_exists to post\_test\_hook.sh * Cleanup Baymodel correctly when Teardown TestKubernetesAPIs * "keypair\_id" should be existent when creating a baymodel * Checkout generated k8s client * Add documentation for testing objects * Fix typo in magnum/common/x509/config.py * Fix wrong parameter passed to heat-params * Conductor: Remove \_update\_stack\_outputs from bay\_conductor * Fix heat-mesos README * Fix retrieving ca\_cert * Change instructions to use get-pip * Modify log message * tox: rm all pyc before doing unit test * Code refactor for ConfFixture * Add Certificate controller for TLS support * Generate certs while creating bay * Add a tool to manage x509 objects * Add CertManager to store CA and client certificate * Fix keystone client usage in barbican client * Enhanced error checking in get\_discovery\_url * Updates the node count key for all types of bay * Updated from global requirements * Remove retrieving external-network-id * Introduce unit test for genconfig * Fix missing sql\_opts * Fix the hard-coded etcd cluster size * Fix jenkins failure for image not found * Change manager to master in docker-swarm * Indirection API implementation * Fix the link for Docker Remote API * Change bay.\*\_cert\_uuid to bay.\*\_cert\_ref * Fix the representation of REST * Change grep option dev-build-atomic-image.rst * Fix method and parameter descriptions * tools/colorizer.py is not used anywhere * Add explicit requirement for decorator module * Add field for container status * Add UNKNOWN constant to container statuses * Removing unused dependency: discover * Sync bay status reason in periodic task * Move 'all\_tenants' options to context * Enable Magnum to send notifications via RPC * Correct exception raised in few db APIs * Use oslo.versionedobjects enums instead of status strings * Add cert\_uuid attributes to Bay in db * Updated from global requirements * Add port type on port option * Doc update for 'magnum coe-service-\*' * Updated from global requirements * Unify using of migration tools * Set project\_id and user\_id from context directly * Enable barbican in devstack * Gate failure due to introduction of new WSME 0.8.0 * proxy-blue print for docker swarm * Fix unit test for replication controller * documentation: fix formatting * Remove retrieving external-network-id * Updated from global requirements * Remove deprecated config 'verbose' * Add roles to context * Remove hardcoded config file in error message * X-User is deprecated and X-Storage-Token is useless * Add default for node\_count and bay\_create\_timeout * Fix wrong usage of filters in periodic task * Add 'master\_addresses' attribute to bay * Add required packages to Developer Quick-Start guide * Updated from global requirements * Fix replication controller unit test sample record * Rename wraper to wrapper * Fix race condition in bay\_update * Adding more information in dev-quickstart.rst * Remove unsed file magnum/config.py * Added gcc in OS-specific prerequisites * Enable ceilometer in devstack * Updated from global requirements * Check before \_update\_stack * Add X509KeyPair controller and conductor * Sets FLANNEL\_ETCD to 127.0.0.1:2379 * Provides correct master IP address for kube-proxy * Updated from global requirements * Use magnum specific flavor * Fix typo in dev-build-atmoic-image.rst * Updated from global requirements * Magnum Container Networking Spec * Switched to Ubuntu 14.04 LTS (Trusty) base image * Fix race condition when syncing bay status across conductors * Make simultaneous bay deletion workable * Updated from global requirements * Add docker method for building mesos image * Add a new field 'master\_count' to bay in API * Updated from global requirements * Unify templating style between templates * Added X509KeyPair object and data model * Remove redundant code about fake\_policy * Use new docker apt repos * Add barbicanclient support in Magnum * Make doc use automatic install and consistent url * Update test cases for test\_bay.py and test\_baymodel.py * API: Handler exception when doing container-list * Updated from global requirements * Fix the string type in k8s templates * Set default node\_count to 1 * Remove coding:utf-8 * Correct the usage of decorator.decorator * Remove XML parsing code from magnum * Add test cases for pagination marker * Instruction for building Fedora Atomic image * update comments in k8s template * Add a new field 'master\_count' to bay in DB * Put kube\_master into a resource group * Bootstrap etcd cluster by discovery\_url * Configure IP tables in devstack plugin * Remove \_\_name\_\_ attribute in UserType * Remove redundant argument in container\_create api * Updated from global requirements * Add magnum\_url method to clients module * Replace etcd ports by its offical ports * Split TemplateDefinitionTestCase to different test case * Some parameter in heat template should be string * Remove incorrect variable in etcd.conf * Add tests for rpcapi container methods * Register kube minions through load balancers * Make k8sclient use the load balancer address * Add test\_policy\_disallow\_detail case for bay/baymodel/node/pod * policy check for container * policy check for service * policy check for rc * Register glance client and other options * Change ca to ca-cert for consistency in k8sclient * Updated from global requirements * Correction for the container-name * Port upstream commit "keep chasing etcd" * Clean up miscellaneous help strings * Add context to TemplateDefinition.extract\_definition * Fix permission error on running periodic task * Update manual guide to not harcode br-ex * Disable expansion for cat commands work as expected * Add guru meditation report for magnum * Do not inspect contianer doesn't exist * Set default of number\_of\_minions to 1 in comments * Contextlib.nested is deprecated * Remove redundant codes * Remove redundant code from FunctionalTest class * Updated from global requirements * Rename "k8s\_master\_url" to a better name * Remove unused oslo-incubator modules * Fix error related policy.json file * Updated from global requirements * Fix the wrong platform usage * Derive the public interface * Remove redundant section about setting up venv from quick-start * Remove redundant code from magnum.test.utils * Replace tearDown with addCleanup in magnum unit tests * Remove duplicate app loading * Remove H302, H803, H904 * Add periodic task to sync up bay status * Use a simple way to determine whether a wsme type is null * Add load balancers in front of kube-master node * Updated from global requirements * Rename PeriodictTestCase to PeriodicTestCase * Add template definition of Mesos bay * Updated from global requirements * policy check for pod * Add manual links into dev-quickstart.rst * Remove redundant FunctionalTest class * Remove a redundant file * Remove redundant commas * Updated from global requirements * Code refactor for tests/unit/db/test\_baymodel.py * Remove unused file in heat-kubernetes template * Remind the user when sort\_key is invalid * Remove setUp function * Fix setup of tests to remove intermittent failure * The nullable parameter is not necessary * Updated from global requirements * Add return value to mocks to allow serialisation * Clean up getting started docs * Updated Magnum documentation * Add \`sudo\` before \`docker\` command on dev docs * Use constraints * Remove unnecessary codes * Drop XML support in Magnum * Remove redundant Copyright text from heat-mesos * Override \_setUp instead setUp when we use fixtures.Fixture * Enable Load-Balancing-as-a-Service in devstack * Temporary work around of functional test failure * Use the pythonic way to catch exceptions * Add .DS\_Store to .gitignore * Eliminate mutable default arguments * Fix unit test failure * Add documentation for smart scale down feature * Implement bay smart scale down * Fix old network\_id usage * Code refactor for prepare\_service * add .idea to .gitignore * Make ironic templates working * 'nose' is no longer required for testing Magnum * Validate bay type on creating resources * Remove unreachable code in API utils * Check for Python 2 when using unicode builtin * Fix minion registration failure * Docker container-create fails with Unicode is not supported * Modify k8s template to support removal policy * Fix the function "wrap\_exception" * Remove duplicated definition of class "APIBase" * Fix sample link in magnum/api/controllers/link.py * Remove unused fields "from\_\*" from API controller * Upgrade code to be python 3 compatible * use bdict instead of cdict for baymodel testcase * pass baymodel date instead of bay data for baymodel create request * Fix os-distro property name to os\_distro * Move conductor common implementations into module * Backport "docker\_volume\_size should be numeric" * Backport "tie minion registration to kubelet activation" * Update heat policy.json * Add periodic task framework * Swith auth\_uri to use v2.0 * Updated from global requirements * policy check for node * Updated from global requirements * Adding functional test cases for Kubernetes APIs * Devstack: Add admin creds in magnum.conf * port to oslo.service * Make swarm work with atomic image * remove duplicate option settings * Add elements for building a Mesos bay node image * Add 'host' field to Pod object * Replace dict.iteritems() with dict.items() * Adds TLS support in pythonk8sclient * Add Bay.list\_all method to allow admin context to query all tenants bay * Fix unit test case error * Updated from global requirements * Backport "configure docker storage correctly" * Backport "docker group is no longer used" * Backport "docker.socket is no longer used" * Fix the wrong number for minion node * Support use admin creds in KeystoneClientV3 * Add make\_admin\_context to Magnum context * Not need to use bay uuid * DB: Support filter\_by status in get\_bay\_list * Create new k8s\_api instance on every calls * Rename image\_id to image in container object * Object: pass filter to bay list * Updated from global requirements * Unknown type 'any' in pythonk8sclient removed * Updated from global requirements * Attempt to fix functional gate test * Web Interface for Magnum in Horizon * policy check for baymodel * Add documentation for how to scale a bay * Backport "doc update -- this is no longer tied to Fedora 20" * Handle Interrupt in conductor service * Update changes in container-create command in quickstart * Correct Hacking rule code * Update config example * Fix the kubernetes opts * Add oslo.policy namespace * Add hacking rule framework for magnum * Updated from global requirements * Add test case for bay policy check * Eliminate eval from swagger.py in k8sclient * Make quickstart more consistent * Modify magnum api context to use user\_name and project\_name * first policy check for bay * enhancement for the common policy enforce * Backport multiple template fixes * Backport "Cleanup the templates" * Backport "Avoid usage of deprecated properties" * Pass environment variables of proxy to tox * Consolidate repeated codes in test\_bay\_conductor * Minor improvement of the quickstart guide * Fix an error on generating configs * Initial Heat template for Mesos * Update quickstart to point to kubernetes 0.15 and v1beta3 manifest * Fix the KeyError and change type 'any' in k8s client code * Return proper response object in Kubernetes APIs * Add test to API Version object * Unify the conductor file and class naming style * Remove major version checking * Cloud driver is not needed * Refactor magnum functional test to add Kubernetes API test * Updated from global requirements * Changes container\_execute to container\_exec * cleanup openstack-common.conf and sync updated files * Updated from global requirements * Remove unused PodFactory class and add parent class for Pod * NotAcceptable exception should be 406 * Fix ignored E121 to E125 pep 8 rules * Add support for API microversions * Add netaddr to requirements * Fix RequestContext's to\_dict method * Remove unused files that used kubectl * Improve unit test code coverage of test\_utils * Updated from global requirements * Add different version support for docker-py * Updated from global requirements * Add license header to swagger.py * Remove IPv4AddressType validate function in magnum * Updated from global requirements * Fix the i18n import * Fix return IPv4 address after validation * Remove old hack for requirements * Fix method signatures unmatching in db api * introduce policy for magnum * Added kube\_register file which required by configure-kubernetes-minion.sh * Add status\_reason field to bay * Passing posargs to flake8 * Change value for logging\_context\_format\_string option * Fix continuation line under/over indented problems * Use oslo.log instead of oslo.incubator log module * Fixing import error in kubernetes client code * Use new docker exec call * Backport "added required id: top-level key" * Backport "Replace CFN resources with heat native" * Semi-Backport "successfully add new minions via stack-update" * Manually import all pot files * Improve dev-quickstart documentation * Improving Unit Test coverage of k8s\_manifest * Use the status defined in bay object Status class * Only define RequestContextSerializer once * Rename bay\_k8s\_heat to more general name * Backport "fixup! added script for dynamically registering a minion" * Backport "added script for dynamically registering a minion" * Backport "minor updates to README" * Backport "added some output descriptions" * remove allow\_logical\_names check * Reorder requirements into ascii-betical order * Correct the wrong parameter pass * Fix the doc format * Catch common Exception in container conductor * Backport "kubenode -> kubeminion" * Setup for translation * Add missing dependencies (from pip-missing-reqs) * Add more note when play magnum with devstack * Add wait condition on swarm services in swarm bay * Remove unused methods and functions * Make functional test work with new tox env * Fix the docker build image issue * Avoid hard-coded UUID in quickstart guide * Fix the ipaddress validate issue * Fix doc according to devstack support * Update docs and some files to remove kubectl * Updated from global requirements * Create container json file under home directory * Remove unused parameter * Added support of Kubernetes API in magnum * Correct a spelling error in quickstart guide * Remove dependency on python-kubernetes * Keypair\_id should be a required option when creating a baymodel * Image\_id should be a required option when creating a baymodel * Add support for container status * Make docker volume config more resilient * Allow container name as identifier in API calls * Move VersionedObject registration to the new scheme * Use oslo.versionedobjects remotable decorators * Make MagnumObject a subclass of Oslo VersionedObject * Fix the container delete uuid issue * Update quickstart guide to v1beta3 manifests * Update service manifest parsing according to v1beta3 * Configure minions properly * Removing unused code in docker\_client * Make Docker client timeout configurable * Move our ObjectSerializer to subclass from the Oslo one * Add local.sh to dev guides * Remove oslo config warnings * Remove trailing spaces in container-service.rst * Update rc manifest parsing according to v1beta3 * Update rc support a manifest change * Update service support a manifest change * Delete swarm bay also delete related containers * Improve validation on baymodel api calls * Add unique column constraints to db 2015.1.0 -------- * Add image name support when create a baymodel * Functional tests for listing resources and templates * Remove cluster\_coe from magnum conf * Add string length validation to names in APIs * fixed\_network should be fixed\_network\_cidr * Remove cluster\_type from conf and Update conf example for opts changes * Add full name of coe to README * Image distro not updated when magnum configured with devstack * Print right message when OSDistroFieldNotFound exception raised * Update Kubernetes version for supporting v1beta3 * Update pod manifest parsing according to v1beta3 * Bay show return api address and node addresses * Add coe attribute to BayModel * Fix the genconfig issue * Fix keyerror issue when create baymodel * Exit the poll loop when bay update failed * Fix bay\_create\_timeout not specify issue * Change from kubernetes 0.11 to 0.15 * Invalid JSON in dynamic registration of minion * Log the reason when bay create or delete failed * Add http:// prefix to kubelet api server * Add etcd 2.0 config file support * Implementation of Cluster distro for baymodel * Fix the versionedobject version issue * Add timeout parameter to bay create * Use container's bay for docker endpoint * Use proper rpcapi in Containers API * Correct spelling mistake in dev-quickstart * Add bay\_uuid attribute to Container model * Remove duplicate replacePod API * Update requirement to fix gate error * Allow rc-update with rc name also * Allow service-update with service name also * Allow pod-update with pod name also * Add command field for container * Add Swarm TemplateDefinition * Move our ObjectListBase to subclass from the Oslo one * Start the conversion to oslo.versionedobjects * Load definitions without requirement checking * Update swarm template for latest atomic image * Add return vlaue judge * Add return response in some of Kubernetes APIs * Correct ImportError in python-k8sclient code * Fix the doc wrong refer marker * New docker-py needs a later version of requests library * Enable Kubernetes v1beta3 API * Update pod support a manifest change * Fix typos and add Glance need * Fix requirements to fit for gate test * Update conf example file * Update dev quick start * Add template test for gate job * Not call dockerclient-api del none exist container * Remove exit from conductor * Implement baylock in conductor for horizontal-scale * Enabld conductor check new template path * Implement listener API for conductor horizontal-scale * Sync heat status to magnum when max\_attempts exceeds * Validate scheme used in urlopen * Remove unsafe usage of eval * Use yaml.safe\_loader instead of yaml.loader * Implements: Fix bug 1442496, add more info in logs * Objects changes for horizontal-scale support * Database changes for conductor horizontal scale * Implements: Fix typos in containers-service.rst * Update bandit for new usage requirement * Use new location for atomic images * Add Template Definitions * DRY Up The Exception Module Tests * Fix the localrc issue * Adding support of python-k8client * Remove contrib directory for devstack * Add Bandit security lint checking via tox * Add a few more operations in post\_test\_hook.sh * Update dev-quickstart doc to match new devstack model * Add glance support in magnum * Add heat for manual steps * Enable Heat services in Devstack settings * Adding a functional test that uses python-magnumclient * Disable test on non-supported environment * Raise more generic exception in bay\_update API * Allow bay-update with bay name also * Add tox functional target * Remove useless exception * Destroy the related resources when delete a bay * Sync heat stack status when delete bay * Add tests for docker conductor * Compare to the right heat status when bay CREATE\_FAILED * Convert to new DevStack plugin model for functional testing * Make room for functional tests * Add tests for docker container * Fix some typos in magnum document * Fix pod tests in kube handler * Rename bay's minions\_address to node\_addresses * Add service test for kube handler * Add more tests for kube handler * Fix the parameters mismatch * Specify region while creating magnum endpoint * Remove unused code in kube handler * Update magnum document to use openstack as namespace * Remove downgrade from existing migrations * Update .gitreview for project rename * WaitCondition timeout attribute should be a number * Reflect client change which is name based management * Add kube pod tests in kubeutils * Add kube service tests in kubeutils * Add kube rc tests in kubeutils * Support keystone regions * Add tests for kubeutils rc * Add tests for kubeutils service * Remove unused code * Rename bay's master\_address to api\_address * Add a spce between the words of feature and set in the spec file * Add os support * Update pod\_delete call for new log message * Modify documentation to point to kubernetes-0.11 atomic image * Handle heat exception in create\_stack * Fix a small architectural error * Removing duplicate service definition * Sync with latest oslo-incubator * Fix an issue on updating bay's node\_count on DB * Fix typo in magnum/magnum/common/rpc?service.py * Allow baymodel name when bay is created * Update quickstart doc * Changed kubectl command to delete rc in magnum * Adjust Gerrit workflow Link * Allow baymodel resource management by "name" * Allow rc resource management by "name" * Allow pod resource management by "name" * Allow service resource management by "name" * Fix typo in magnum/doc/source/dev/dev-manual-quickstart.rst * Fix typos in magnum/specs/containers-service.rst * Remove non-ascii characters in magnum/doc/source/dev/dev-quickstart.rst * Fix the wrong path in the dev-quickstart.rst * Assign docker-volume-size to baymodel in document * Fix the wrong image name * Allow bay resource management by "name" * Fix the token in stack creation issue * Remove beaker.yaml * When polling heat set bay status * Fixed path in Devstack plugin README * Add docker\_volume\_size in the kubecluster-coreos.yaml template * Allow specification of ssh authorized key and token url for coreos * Add devstack module to contrib * Make resource creation fail when no 'id' in manifest * Make resource creation return 400 with empty manifest 2015.1.0b2 ---------- * Make service\_create return 400 with invalid json manifest * Make rc\_create return 400 with invalid json manifest * Make pod\_create return 400 with invalid json manifest * Add Heat tasks * Pull updates from larsks heat-kubernetes repo * Fix doc typo and make style consistent * Fix an error on cloning kubenetes repo * Make service\_create return 400 status on empty manifest * Requirements List Updates * Update dev-quickstart.rst * Change default path of kubecluster.yaml to absolute one * Fix the missing magnum dir creation * Remove unused ironic handler * Correctly delete replica controller * Improve logging in kube handler * Move folder heat-kubernetes to magnum/templates * Correct doc format * Add master flavor * Added requests in requirements * Introduce a coreos for heat-kubernetes in magnum * Support i18n log format for error in magnum * Allow specification of fixed\_network * Patch timeutils from oslo\_utils * Support i18n log format for warning in magnum * Support i18n in magnum * Register all magnum exceptions in allow\_remote\_exmods * Allow specification of docker volume size * Implement a Heat k8s template for Ironic * Catch PodNotFound during pod\_delete and continue * Fix BayNotFound error on deleting replica controller * Change link of quick start to git.openstack.org * Create heat template for docker-swarm bay * Allow pod delete to succeed when not found on bay * Fix typo in openstack-common * Fix MagnumException for parsing custom message * Allow Json patch to take an integer value * Fix docker client server mismatch * Fix the wrong parameter * Disallow bay-create with non-positive integer * Do not call get\_json() in TestPost * Update requirement * Fix the wrong number * Remove # -\*- encoding: utf-8 -\*- from some python files * Remove get\_xxxinfo\_list from magnum * Move bay defintion extraction code * Implement update bay node\_count * Add status attribute to bay * Pull in updates from larsks heat template repo * Change replicas to 2 in dev quick start * Move variable attempts\_count to local scope * Change ctxt to context to for consistency * Container logs should use HTTP GET other actions use PUT * Refactor bay\_create at k8s conductor * Remove imports from oslo namespaces * Change ctxt to context to for consistency * Freshen up the magnum conf file * Tech Debt: Fixed code alignment issues * Change command for creating a virtualenv * Cleanup code and remove newly ignored hack rules * Keep up with the global requirements * Adding python-kubernetes to requirements * Update quickstart-dev guide * Add tests for Node Rest API * Add tests for Replication Controller Rest API * Remove API get() for some magnum objects * Enable multi tenant for k8s resource get\_xx\_by\_id * Enable multi tenant for k8s resource get\_xxx\_list * Enable multi tenant for two k8s resource operation APIs * Removed container\_id from container api * Add tests for Service Rest API * Enable multi tenant for get\_pod\_by\_uuid * Fix and clean up Container api * Add project\_id and user\_id to service and rc * Add project\_id and user\_id to pod * Clean up codes in node API * Consolidate codes for k8s resources api * Fix and clean up Container api * Enable multi tenant for get\_xxx\_by\_id * Enable multi tenant for get\_xxx\_list * Enable multi tenant for get\_xx\_by\_uuid * Don't use deprecated auth header * Add tests for Pod api * Correct typo for function name * Remove redundant query from get\_bay\_by\_uuid * Pull RequestContext Patching Into Test Base * Use real context for db test * Update doc string for api base.py * Ensure auth\_token\_info present on RequestContext * Enable bay delete support multi tenant * Persist project\_id and user\_id for baymodel object * Add tests for Bay API * Persist project\_id and user\_id * Fix manifest url doesn't work * Fix and clean up ReplicationController api * Fix and clean up codes at service api * Fix and clean up codes at Pod api * Add project\_id and user\_id to db query filter * Fix the stamp parameter in db-manage * Make db-manage instructions same as usage * Rename test\_baymodels.py to test\_baymodel.py for db test * Fix and clean up BayModel and Bay api * Point to proper quickstart guide in index.rst * Fix documentation to display on git hub correctly * Add a DB management README * Add project\_id and user\_id to magnum objects * Rest back objects for test\_objects * Update the conf sample file * Fixed typos * Fix the miss opts in genconfig * Devstack is broken with heat/juno branch * Reduce resources required to launch example * Add documentation about installing kubectl * Make sure no duplicate stack name when creating k8s bay * Improve the quickstart guide * Claim tested OS/version on quickstart guide * Neutron is required by Magnum not Ironic * Add more tests for test\_objects.py * Add devstack neutron configuration link to quick start * Make the quickstart guide to work * Add a link for "Getting Started Guides" * Allow deletion of rc/service/pod if stack has been deleted * Delete bay independent of presence of heat stack * Rename "ackend" to "conductor" * Remove automatic deletion of failed stacks * Remove redundant information * Log error and exit if templates are not installed * Add note about heat finishing the job before continuing * Port NoExceptionTracebackHook from Ironic * Get rid of = and replace with space * Change Service name to magnum * Use sudo in installation of templates * Port parsable error middleware from Ironic * Fix \_retrive\_k8s\_master\_url * Rename \`resource\`\_data/url attributes to manifest/manifest\_url * Make replication controller delete working * Fix the typo in specs * Fix deprecated warning for oslo.serialization * Set replication controller name from rc manifest * Update developer quickstart * Implement bay deletion on api * Sync from oslo requirements * Add rc\_data support for magnum replication controller * Implement service deletion * Set service name from service manifest * Enable kube.py get k8s api server port from baymodel * Implement pod deletion * Set pod name from pod manifest * Add parser for k8s manifest * Parse stack output value for bay * Remove apiserver\_port attribute from bay\_definition * Add tests for baymodel rest api * Fix the list of unset fields in baymodel * Add max\_limit to sample config * Update the sequence for master\_address and minion\_addresses * Correct the typo in dev-quickstart.rst * Add tests for objects * Add apiserver\_port to BayModel * Add some test for magnum objects * Remove inappropriate exceptions and their usages * Add use case: Permit use of native ReST APIs * Implement service creation * Implement pod creation * Fix dbapi method \_add\_baymodels\_filters * Raise on deleting a referenced baymodel * Update README.rst * Docker: Pull Image Before Container Create * Adjusted README to add ReplicationController * Implements k8s resource creation/updating with data * Add some comments for delete logic in bay\_create * Add master endpoint support to kube\_utils.py * Add unit tests for dbapi of Node and Container * Add more unit test for replication controller * Add unit tests for the conductor AMQP API * Remove usage of BayLocked * Add missing Exceptions * Add bay\_uuid for replication controller * Fix the opts in genconfig issue * Add test and refactoring on bay\_create * Remove ctxt from RPC API * Remove ctxt from rpcapi for pod create * bay-create does not need ctxt on the rpcapi side * Add oslo concurrency namespace * Add tests for Pod and Service on dbapi * Add DB unit test for JSONEncodedDict and JSONEncodedList * Make bay, service and pod show call db directly * Make baymodel operations working * Add ctxt for all conductor APIs * Prune DB API * Add k8s replication support for magnum * Added multi-region/multi-cloud use case to specs * Added container network use case to specs * execute and logs API response should be JSON * Add tests for Bay dbapi and make them pass * Move out docker client logic from docker conductor * get container-execute to work * Get pause and unpause working * Remove type from bay object * Add tests for baymodel dbapi and make them pass * change old oslo.concurrency to oslo\_concurrency * Add service\_update for k8s * Service create need filename as parameter * Enable pod update using pod\_defintion\_url * Relocate and rename kubecli.py * Add replication controller to magnum db * Add bay\_delete support for bay handler * Add bay\_show support for bay handler * Remove get\_service\_by\_instance * Add BayModel to magnum README * Update description for magnum service * Implement creating Bay using heat * Fix and cleanup baymodel dbapi * Fix keystoneclient and heatclient incompatibility * Fix context is not set correctly * Catch Docker API errors * Pod create need filename as parameter * Add hooks to obtain conductor api * Split up extracting auth.py file * Add more fields for service * Add more test for magnum API * Add more fields for Pod * container\_delete should call docker.remove\_container * Get container-list REST API working * Container Controller gets container uuid not name * Add more tests for magnum/common * Add some tests for app and auth * Remove objects.registry.py * Split test\_all\_objects.py to different files * Implement "docker execute" for magnum * Update container operation parameter to name * Fix RequestContext attributes * Flesh out some more docker container methods * Remove redundant version info for magnum objects * fix the wrong order of steps and missing password in db connection * Implement docker backend for magnum service * Implement container\_list * Remove bay\_list from bay\_ironic.py and bay\_k8s\_heat.py * Implement bay\_ironic.py * Add a hyper-link for quick start * Add a new API of get\_by\_pod\_name for pod object * Update log message for kubecli.py * Update log message and some functions in kube.py * Fix external\_network\_id * Fix authentication * Fix auth\_url type * Remove type and image\_id replace with baymodel\_id * Add a baymodel object * Add bay kubernetes\_heat type WIP * Migrate to oslo.context * Reference proper file in cmd.conductor * Knitting Pod and Service object flow for Kubernetes backend * Update migration files to reflect new schema * Implement Service object Rest APIs * Add heat client * Add keystone client * Fix failing creation of MagnumException subclasses * Rename backend to conductor * Remove conductor * Rename the test\_functional.py to the api * Add RPC backend service * Add bay uuid to Service Objects * Add documentation for a developer quickstart guide * Add a node object * Update db migration for pod * Add image\_id and node\_count to bay * Copy ironic/common files to magnum/common for RPC server * Remove common/rpc directory * Add dependencies from oslo-incubator for RPC services * Update openstack.common from oslo-incubator * Add bay uuid to pod model objects * Remove stray print which caused magnum-db-manage to fail * Workflow documentation is now in infra-manual * Add stubs for the container actions * removed unused file for root controller * Fix REST API and test case for Containers * Implement python interface to execute k8s CLI services * Remove crud in magnum/objects/sqlalchemy * Get the Pod REST API and tests working * Add missing exceptions in code borrowed from Ironic * Get HTTP Patch test working with Bay REST API * Look for tests only under magnum/tests directory * Remove cruft in api tree * Use versioned objects for Container objects * Use versioned objects for bays * Add object.service * Add object.pod * Add an object.container * Modify the object registry to support ver objects * Remove unnecessary model init call * Set max limit as required by versioned objects * Add objects/utils.py copied from Ironic * Copy Ironic's database model codebase * Add some common code copied from Ironic * Add versioned object dependency modules * Add versionutils from oslo-incubator * Add sqlalchemy subclass for Bay * Switch to keystonemiddleware * Fix dockerfile comment containing unrecognized argument 'host' * Split up Base and Query class * Add sqlalchemy subclass for Container * Update README for magnum * Add base files for sqlalchemy implementation * Replaces Solum with Magnum * Fix arguments to bay\_create in AMQP * Change backends references to backend * Remove client installation from "Run" section * Call proper bay\_create backend * Add Functional tests for bays and pods * fix awkward use of \_\_len\_\_() * Flesh out the Container REST API * Fix returning bad variable * Standardize on id in the REST API * Make pod in sync with bay * Avoid apt-get update getting cached in Dockerfile * Add simple objects interface and registry * Fix docker container * Fully implement bay object in ReST API * Fix python-pip package not found in Dockerfile * Fix README.rst code block * Add Heat and Ironic Bay placeholder Handlers * Authenticate all API calls * REST API for container actions * Add getting started guide to README.rst * Flesh out the container REST API a tiny bit * Get the root version and v1 REST API working * Tidy up the ReST API * Enable automatic sample config generation * Added Magnum Configuration file * Added doc string & solved pep8 issue * Add backend processor for AMQP * Update README.rst * Add exception.py * Add safe\_utils to the source base * Initial import of prototyped interfaces * Add initial conductor API and service * Add RPC server code * Small Dockerfile changes * Dockerfile Improvements * Containers Service Spec * Add DockerFile to run magnum-api service * Modify gitignore to ignore cover * Do not say we support py3 * Add Keystone authentication to rest API * Replaces Solum with Magnum. Added doc string for the Magnum API * Add context base module from oslo-incubator * Create a ReST API entrypoint * Add H302 to the ignored hacking rules list * Import oslo-incubator's logging library * Incorporate feedback from the Initial commit review * Initial commit from github (squashed) * Added .gitreview magnum-6.1.0/AUTHORS0000664000175100017510000002670313244017674014116 0ustar zuulzuul000000000000002172869000074 Aaron-DH Abhishek Chanda Abhishek Chanda Accela Zhao Adrian Otto Ajay Kalambur Akash Gangil Akhila Alberto Gireud Amey Bhide Anandprakash Tandale Andreas Jaeger Andreas Jaeger Andrew Melton Angus Lees Anh Tran Antoni S. Puimedon ArchiFleKs Arun prasath Attila Fazekas AvnishPal Baohua Yang Bertrand Lallau Bertrand Lallau Bertrand NOEL Bertrand NOEL Bharath Thiruveedula Bin-Lu <369283883@qq.com> Bradley Jones Cale Rath Cao Xuan Hoang Cedric Brandily Chandan Kumar Chandan Kumar Chandra Ganguly ChangBo Guo(gcb) Chaozhe.Chen Chetna Khullar Chulmin Kang Clenimar Filemon Clenimar Filemon Colleen Murphy Corey O'Brien Costin GamenÈ› Cristovao Cordeiro Dane LeBlanc Daneyon Hansen Daniel Abad Danil Golov Davanum Srinivas Davanum Srinivas Deeksha Deepak Devdatta Kulkarni Dinesh Bhor Dirk Mueller Doug Hellmann Drago Rosson Egor Guz Eli Qiao Eric Brown Fang Fenghua <449171342@qq.com> Fang fenghua <449171342@qq.com> Fei Long Wang Feilong Wang Feng Shengqin Fenghuafang <449171342@qq.com> Ferenc Horváth Flavio Percoco Florian Haas Georgiy Kutsurua Grzegorz Grasza Gyorgy Szombathelyi HackToday Haiwei Xu Hieu LE Hironori Shiina Hongbin Lu Hongbin Lu Hongbn Lu Hua Wang Ian Main JUNJIE NAN James E. Blair James E. Blair Jamie Hannaford Janek Lehr Jason Dunsmore Javier Castillo Alcíbar Jay Lau (Guangya Liu) Jay Lau Jaycen Grant Jennifer Carlucci Jeremy Stanley Jerome Caffet Jesse Pretorius Joe Cropper Johannes Grassler Jongsoo Yoon Juan Badia Payno Kai Qiang Wu Kai Qiang Wu(Kennan) Kai Qiang Wu(Kennan) Kennan Kennan Kevin Lefevre Kevin Zhao Kirsten G Lan Qi song Larry Rensing Lars Butler Lin Lin Yang Lingxian Kong Lu lei Luong Anh Tuan M V P Nitesh Madhuri Madhuri Madhuri Kumari Madhuri Kumari Madhuri Kumari Madhuri Kumari Mahito Mahito OGURA Manjeet Singh Bhatia Mark Goddard Markus Sommer Martin Falatic Mathieu Velten Michael Krotscheck Michael Lekkas Michael Sambol Michael Still Michael Tupitsyn Michal Jura Michal Rostecki Mike Fedosin Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mohammed Naser Monty Taylor Motohiro OTSUKA Murali Allada Namrata Nate Potter Navneet Gupta Ngo Quoc Cuong Nguyen Hung Phuong Niall Bunting OTSUKA, Yuanying OTSUKA, Yuanying OTSUKA, Yuanying OpenStack Release Bot PanFengyun PanFengyun Paul Czarkowski Paulo Ewerton Peiyu Lin Perry Rivera Perry Rivera Pierre Padrixe Pradeep Kilambi Rajiv Kumar Randall Burt Ricardo Rocha Robert Collins Robert Pothier Ronald Bradford Ronald Bradford Ryan Rossiter Samantha Blanco Saulius Alisauskas Sean Dague Sean McGinnis Sergey Vilgelm ShaoHe Feng Shawn Aten Shinn'ya Hoshino Shu Muto Shuquan Huang Spyros Trigazis (strigazi) Spyros Trigazis Spyros Trigazis Stephen Gordon Stephen Watson Steven Dake Steven Dake Surojit Pathak Swapnil Kulkarni (coolsvap) Swapnil Kulkarni Syed Armani Thomas Bechtold Thomas Goirand Thomas Maddox Tom Cammann Tom Cammann Ton Ngo Tovin Seven Van Hung Pham Velmurugan Kumar Victor Sergeyev Vijendar Komalla Vikas Choudhary Vilobh Meshram Vinay Vivek Jain Vu Cong Tuan Wanghua Wanlong Gao Ward K Harold Wenzhi Yu Xi Yang Xian Chaobo Xicheng Chang YAMAMOTO Takashi Yang Hongyang YangLiYun <6618225@qq.com> Yasemin Demiral Yash Bathia Yatin Kumbhare Yolanda Robla Yongli He Yosef Hoffman Yuiko Takada Yusaku Sawai Yushiro FURUKAWA Zachary Sais Zane Bitter Zhenguo Niu ZhiQiang Fan ZhouPing <11236488@qq.com> Zuul abhishekkekane akhiljain23 ashish.billore avnish chao liu chenlx chenxing chestack coldmoment deepakmourya digambar digambar digambarpatil15 dimtruck dimtruck eric fengbeihong gecong1973 gengchc2 hanchao houming-wang howardlee huang.huayong indicoliteplus iswarya_vakati jinzhenguo lei-zhang-99cloud leiyashuai leizhang lingyongxu lqslan maliki mathspanda melissaml murali allada npraveen35 pawnesh.kumar pengdake <19921207pq@gmail.com> prameswar qinchunhua rabi rajat29 rajiv ricolin ricolin sayalilunkad shravya space ting.wang trilliams twm2016 venkatamahesh venkatamahesh vincent wangbo wangqun weiweigu wenchma xpress xxj <2001xxj@gmail.com> yang wang yanghuichan yatin yatin yatin karel yatinkarel yatinkarel yuanpeng yuhui_inspur yuki kasuya yuntongjin yuntongjin yuyafei zhang.lei zhangyanxian zhufl ztetfger “Akhila magnum-6.1.0/.coveragerc0000666000175100017510000000016213244017334015151 0ustar zuulzuul00000000000000[run] branch = True source = magnum omit = magnum/tests/* [report] ignore_errors = True exclude_lines = pass magnum-6.1.0/CONTRIBUTING.rst0000666000175100017510000000103113244017334015465 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/magnum magnum-6.1.0/LICENSE0000666000175100017510000002363713244017334014051 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. magnum-6.1.0/releasenotes/0000775000175100017510000000000013244017675015530 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/0000775000175100017510000000000013244017675017030 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/ocata.rst0000666000175100017510000000023013244017334020636 0ustar zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata magnum-6.1.0/releasenotes/source/_static/0000775000175100017510000000000013244017675020456 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/_static/.placeholder0000666000175100017510000000000013244017334022721 0ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/pike.rst0000666000175100017510000000021713244017334020504 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike magnum-6.1.0/releasenotes/source/conf.py0000666000175100017510000002150613244017334020325 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Magnum Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Mar 29 10:17:02 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/magnum' bug_project = 'magnum' bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Magnum Release Notes' copyright = u'2016, Magnum developers' # Remove setting of version/release # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'MagnumReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'MagnumReleaseNotes.tex', u'Magnum Release Notes Documentation', u'2016, Magnum developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'magnumreleasenotes', u'Magnum Release Notes Documentation', [u'2016, Magnum developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'MagnumReleaseNotes', u'Magnum Release Notes Documentation', u'2016, Magnum developers', 'MagnumReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] magnum-6.1.0/releasenotes/source/_templates/0000775000175100017510000000000013244017675021165 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013244017334023430 0ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/index.rst0000666000175100017510000000077313244017343020672 0ustar zuulzuul00000000000000.. Magnum Release Notes documentation master file, created by sphinx-quickstart on Tue Mar 29 10:17:02 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to Magnum Release Notes's documentation! ================================================ Contents: .. toctree:: :maxdepth: 2 unreleased pike ocata newton mitaka liberty Indices and tables ================== * :ref:`genindex` * :ref:`search` magnum-6.1.0/releasenotes/source/mitaka.rst0000666000175100017510000000021413244017334021017 0ustar zuulzuul00000000000000============================ Mitaka Series Release Notes ============================ .. release-notes:: :branch: origin/stable/mitaka magnum-6.1.0/releasenotes/source/unreleased.rst0000666000175100017510000000015313244017334021702 0ustar zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: magnum-6.1.0/releasenotes/source/locale/0000775000175100017510000000000013244017675020267 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/locale/fr/0000775000175100017510000000000013244017675020676 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175100017510000000000013244017675022463 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000666000175100017510000000312513244017343025507 0ustar zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Magnum Release Notes 5.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-23 20:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 04:59+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "" "--keypair-id parameter in magnum CLI cluster-template-create has been " "renamed to --keypair." msgstr "" "Le paramètre --keypair-id dans cluster-template-create du CLI magnum a été " "renommé pour --keypair." msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid ":ref:`genindex`" msgstr ":ref:`genindex`" msgid ":ref:`search`" msgstr ":ref:`search`" msgid "Contents:" msgstr "Contenu :" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Indices and tables" msgstr "Index et table des matières" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Upgrade Notes" msgstr "Notes de mises à jours" msgid "Welcome to Magnum Release Notes's documentation!" msgstr "Bienvenue dans la documentation de la note de Release de Magnum" msgid "[1] https://review.openstack.org/#/c/311476/" msgstr "[1] https://review.openstack.org/#/c/311476/" magnum-6.1.0/releasenotes/source/locale/ja/0000775000175100017510000000000013244017675020661 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175100017510000000000013244017675022446 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000666000175100017510000010172713244017334025501 0ustar zuulzuul00000000000000# Shu Muto , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: Magnum Release Notes 5.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-28 14:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-08-25 05:51+0000\n" "Last-Translator: Shu Muto \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "" "--keypair-id parameter in magnum CLI cluster-template-create has been " "renamed to --keypair." msgstr "" "Magnum CLI ã® cluster-template-create ã® --keypair-id パラメーターã®åå‰ãŒ --" "keypair ã«å¤‰æ›´ã•れã¾ã—ãŸã€‚" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "3.2.0" msgstr "3.2.0" msgid "4.0.0" msgstr "4.0.0" msgid "4.1.0" msgstr "4.1.0" msgid "4.1.1" msgstr "4.1.1" msgid "4.1.2" msgstr "4.1.2" msgid "5.0.0" msgstr "5.0.0" msgid ":ref:`genindex`" msgstr ":ref:`genindex`" msgid ":ref:`search`" msgstr ":ref:`search`" msgid "" "A new section is created in magnum.conf named cinder. In this cinder " "section, you need to set a value for the key default_docker_volume_type, " "which should be a valid type for cinder volumes in your cinder deployment. " "This default value will be used if no volume_type is provided by the user " "when using a cinder volume for container storage. The suggested default " "value the one set in cinder.conf of your cinder deployment." msgstr "" "magnum.conf ã« cinder ã¨ã„ã†åå‰ã®æ–°ã—ã„セクションãŒä½œæˆã•れã¾ã™ã€‚ã“ã® " "cinder セクションã§ã¯ã€ã‚­ãƒ¼ default_docker_volume_type ã®å€¤ã‚’設定ã™ã‚‹å¿…è¦ãŒã‚" "りã¾ã™ã€‚ã“ã®å€¤ã¯ã€ cinder ã®æ§‹æˆã§æœ‰åŠ¹ãª cinder ボリュームタイプã§ã‚ã‚‹å¿…è¦ãŒ" "ã‚りã¾ã™ã€‚ã“ã®ãƒ‡ãƒ•ォルト値ã¯ã€ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ç”¨ã« cinder ボリュームを" "使用ã™ã‚‹ã¨ãã«ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒ volume_type を指定ã—ãªã„å ´åˆã«ä½¿ç”¨ã•れã¾ã™ã€‚ ææ¡ˆ" "ã•れã¦ã„るデフォルト値ã¯ã€ cinder æ§‹æˆã® cinder.conf ã«è¨­å®šã•れã¦ã„ã¾ã™ã€‚" msgid "" "Add Microversion 1.3 to support Magnum bay rollback, user can enable " "rollback on bay update failure by setting 'OpenStack-API-Version' to " "'container-infra 1.3' in request header and passing 'rollback=True' param in " "bay update request." msgstr "" "Magnum ベイã®ãƒ­ãƒ¼ãƒ«ãƒãƒƒã‚¯ã‚’サãƒãƒ¼ãƒˆã™ã‚‹ãƒžã‚¤ã‚¯ãƒ­ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 1.3 を追加ã™ã‚‹ã¨ã€" "ユーザーã¯ãƒªã‚¯ã‚¨ã‚¹ãƒˆãƒ˜ãƒƒãƒ€ãƒ¼ã§ 'OpenStack-API-Version' ã‚’ 'container-infra " "1.3' ã«è¨­å®šã—ã€ãƒ™ã‚¤æ›´æ–°è¦æ±‚ã§ 'rollback=True' パラメータを渡ã™ã“ã¨ã§ãƒ™ã‚¤ã®æ›´" "新失敗時ã®ãƒ­ãƒ¼ãƒ«ãƒãƒƒã‚¯ã‚’有効ã«ã§ãã¾ã™ã€‚" msgid "" "Add Support of LBaaS v2, LBaaS v1 is removed by neutron community in Newton " "release. Until now, LBaaS v1 was used by all clusters created using magnum. " "This release adds support of LBaaS v2 for all supported drivers." msgstr "" "LBaaS v2 ã®ã‚µãƒãƒ¼ãƒˆã‚’追加ã—ã¾ã—ãŸã€‚LBaaS v1 㯠Newton リリース㧠neutron コ" "ミュニティã«ã‚ˆã£ã¦å‰Šé™¤ã•れã¦ã„ã¾ã™ã€‚今ã¾ã§ã€ LBaaS v1 ã¯ã€Magnum を使用ã—ã¦ä½œ" "æˆã•れãŸã™ã¹ã¦ã®ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã§ä½¿ç”¨ã•れã¦ã„ã¾ã—ãŸã€‚ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€ã‚µãƒãƒ¼ãƒˆã•" "れã¦ã„ã‚‹ã™ã¹ã¦ã®ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã® LBaaS v2 ã®ã‚µãƒãƒ¼ãƒˆãŒè¿½åŠ ã•れã¦ã„ã¾ã™ã€‚" msgid "" "Add configuration for overlay networks for the docker network driver in " "swarm. To use this feature, users need to create a swarm cluster with " "network_driver set to 'docker'. After the cluster is created, users can " "create an overlay network (docker network create -d overlay mynetwork) and " "use it when launching a new container (docker run --net=mynetwork ...)." msgstr "" "swarm ã® docker ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã®ã‚ªãƒ¼ãƒãƒ¼ãƒ¬ã‚¤ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã®è¨­å®šã‚’追加" "ã—ã¾ã™ã€‚ã“ã®æ©Ÿèƒ½ã‚’使用ã™ã‚‹ã«ã¯ã€ network_driver ㌠'docker' ã«è¨­å®šã•れ㟠" "swarm クラスタを作æˆã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚クラスターを作æˆã—ãŸå¾Œã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ã¯" "オーãƒãƒ¼ãƒ¬ã‚¤ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ï¼ˆdocker network create -d overlay mynetwork)を作æˆ" "ã—ã€æ–°ã—ã„コンテナーを起動ã™ã‚‹ã¨ãã«ä½¿ç”¨ã§ãã¾ã™ï¼ˆdocker run -" "net=mynetwork ...)。" msgid "" "Add docker-storage-driver parameter to baymodel to allow user select from " "the supported drivers. Until now, only devicemapper was supported. This " "release adds support for OverlayFS on Fedora Atomic hosts with kernel " "version >= 3.18 (Fedora 22 or higher) resulting significant performance " "improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode " "on the physical machine, but must be disabled in the container. Thus, if you " "select overlay for docker-storage-driver SELinux will be disable inside the " "containers." msgstr "" "docker-storage-driver パラメータをベイモデルã«è¿½åŠ ã™ã‚‹ã¨ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ã¯ã‚µãƒãƒ¼ãƒˆ" "ã•れã¦ã„るドライãƒãƒ¼ã‹ã‚‰é¸æŠžã§ãるよã†ã«ãªã‚Šã¾ã™ã€‚ã“れã¾ã§ã¯ã€ devicemapper " "ã ã‘ãŒã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã—ãŸã€‚ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€ã‚«ãƒ¼ãƒãƒ«ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒ 3.18 以" "上(Fedora 22 以上)㮠Fedora Atomic ホスト上㧠OverlayFS ãŒã‚µãƒãƒ¼ãƒˆã•れã€ãƒ‘" "フォーマンスãŒå¤§å¹…ã«å‘上ã—ã¾ã—ãŸã€‚ OverlayFS を使用ã™ã‚‹ã«ã¯ã€ SELinux を有効" "ã«ã—ã¦ç‰©ç†ãƒžã‚·ãƒ³ã§å¼·åˆ¶ãƒ¢ãƒ¼ãƒ‰ã«ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ãŒã€ã‚³ãƒ³ãƒ†ãƒŠãƒ¼å†…ã§ç„¡åйã«ã™ã‚‹" "å¿…è¦ãŒã‚りã¾ã™ã€‚ ã—ãŸãŒã£ã¦ã€docker-storage-driver ã« overlay ã‚’é¸æŠžã—ãŸå ´" "åˆã€ SELinux ã¯ã‚³ãƒ³ãƒ†ãƒŠå†…ã§ç„¡åйã«ãªã‚Šã¾ã™ã€‚" msgid "" "Add flannel's host-gw backend option. Magnum deploys cluster over a " "dedicated neutron private network by using flannel. Flannel's host-gw " "backend gives the best performance in this topopolgy (private layer2) since " "there is no packet processing overhead, no reduction to MTU, scales to many " "hosts as well as the alternatives. The label \"flannel_use_vxlan\" was " "repurposed when the network driver is flannel. First, rename the label " "flannel_use_vxlan to flannel_backend. Second, redefine the value of this " "label from \"yes/no\" to \"udp/vxlan/host-gw\"." msgstr "" "flannel ã® host-gw ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã‚ªãƒ—ションを追加ã—ã¾ã™ã€‚Magnum 㯠flannel を使" "用ã—ã¦å°‚用㮠neutron プライベートãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ä¸Šã«ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚’デプロイã—ã¾" "ã™ã€‚ Flannel ã® host-gw ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã¯ã€ãƒ‘ケット処ç†ã®ã‚ªãƒ¼ãƒãƒ¼ãƒ˜ãƒƒãƒ‰ãŒãªã〠" "MTU ãŒå‰Šæ¸›ã•れãšã€å¤šãã®ãƒ›ã‚¹ãƒˆã‚„代替手段ã«ã‚¹ã‚±ãƒ¼ãƒ«ã™ã‚‹ãŸã‚ã€ã“ã®ãƒˆãƒãƒ­ã‚¸ï¼ˆãƒ—" "ライベートレイヤー2ï¼‰ã§æœ€é«˜ã®ãƒ‘フォーマンスを発æ®ã—ã¾ã™ã€‚ ラベル " "\"flannel_use_vxlan\" ã¯ã€ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ãŒ flannel ã®ã¨ãã«å†åˆ©ç”¨ã•れ" "ã¾ã™ã€‚ã¾ãšã€ãƒ©ãƒ™ãƒ«\n" "flannel_use_vxlan ã®åå‰ã‚’ flannel_backend ã«å¤‰æ›´ã—ã¾ã™ã€‚次ã«ã€ã“ã®ãƒ©ãƒ™ãƒ«ã®å€¤" "ã‚’ \"yes/no\" ã‹ã‚‰ \"udp/vxlan/host-gw\" ã«å†å®šç¾©ã—ã¾ã™ã€‚" msgid "" "Add microversion 1.5 to support rotation of a cluster's CA certificate. " "This gives admins a way to restrict/deny access to an existing cluster once " "a user has been granted access." msgstr "" "マイクロãƒãƒ¼ã‚¸ãƒ§ãƒ³ 1.5 を追加ã—ã¦ã€ã‚¯ãƒ©ã‚¹ã‚¿ã® CA 証明書ã®ãƒ­ãƒ¼ãƒ†ãƒ¼ã‚·ãƒ§ãƒ³ã‚’サ" "ãƒãƒ¼ãƒˆã—ã¾ã™ã€‚ã“れã«ã‚ˆã‚Šã€ç®¡ç†è€…ã¯ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«ã‚¢ã‚¯ã‚»ã‚¹æ¨©ãŒä»˜ä¸Žã•れるã¨ã€æ—¢å­˜" "ã®ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’制é™/æ‹’å¦ã™ã‚‹æ–¹æ³•ã‚’æä¾›ã—ã¾ã™ã€‚" msgid "" "Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. " "This driver is experimental for now, and operators need to get it from /" "contrib folder." msgstr "" "OpenSUSE ã§ k8s クラスターを実行ã™ã‚‹ãŸã‚ã®æ–°ã—ã„ OpenSUSE ドライãƒãƒ¼ã®ã‚µãƒãƒ¼" "トを追加ã—ã¾ã—ãŸã€‚ã“ã®ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã¯ä»Šã®ã¨ã“ã‚実験的ãªã‚‚ã®ã§ã€ã‚ªãƒšãƒ¬ãƒ¼ã‚¿ãƒ¼ã¯ /" "contrib フォルダã‹ã‚‰å–å¾—ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" msgid "" "Add support to store the etcd configuration in a cinder volume. " "k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of " "the volume. A value of 0 or leaving the label unset means no volume should " "be used, and the data will go to the instance local storage." msgstr "" "etcd 設定を cinder ãƒœãƒªãƒ¥ãƒ¼ãƒ ã«æ ¼ç´ã™ã‚‹ãŸã‚ã®ã‚µãƒãƒ¼ãƒˆã‚’追加ã—ã¾ã™ã€‚ " "k8s_fedora_atomic ã¯ã€ãƒœãƒªãƒ¥ãƒ¼ãƒ ã®ã‚µã‚¤ã‚ºã‚’定義ã™ã‚‹æ–°ã—ã„ラベル " "etcd_volume_size ã‚’å—ã‘入れã¾ã™ã€‚値 0 ã¾ãŸã¯ãƒ©ãƒ™ãƒ«ã‚’設定ã—ãªã„ã¾ã¾ã«ã™ã‚‹ã¨ã€" "ボリュームを使用ã™ã‚‹å¿…è¦ãŒãªããªã‚Šã€ãƒ‡ãƒ¼ã‚¿ã¯ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã®ãƒ­ãƒ¼ã‚«ãƒ«ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸" "ã«ä¿å­˜ã•れã¾ã™ã€‚" msgid "" "Added parameter in cluster-create to specify the keypair. If keypair is not " "provided, the default value from the matching ClusterTemplate will be used." msgstr "" "キーペアを指定ã™ã‚‹ãŸã‚ã®ãƒ‘ラメータを cluster-create ã«è¿½åŠ ã—ã¾ã—ãŸã€‚キーペア" "ãŒæŒ‡å®šã•れã¦ã„ãªã„å ´åˆã€ä¸€è‡´ã™ã‚‹ ClusterTemplate ã®ãƒ‡ãƒ•ォルト値ãŒä½¿ç”¨ã•れã¾" "ã™ã€‚" msgid "" "All container/pod/service/replication controller operations were removed. " "Users are recommended to use the COE's native tool (i.e. docker, kubectl) to " "do the equivalent of the removed operations." msgstr "" "ã™ã¹ã¦ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼/ãƒãƒƒãƒ‰/サービス/レプリケーションコントローラæ“作ãŒå‰Šé™¤ã•れ" "ã¾ã—ãŸã€‚ COE ã®ãƒã‚¤ãƒ†ã‚£ãƒ–ツール( docker〠kubectl ãªã©ï¼‰ã‚’使用ã—ã¦ã€å‰Šé™¤ã•れ" "ãŸæ“作ã¨åŒç­‰ã®æ“作を行ã†ã“ã¨ã‚’ãŠå‹§ã‚ã—ã¾ã™ã€‚" msgid "" "Auto generate name for cluster and cluster-template. If users create a " "cluster/cluster-template without specifying a name, the name will be auto-" "generated." msgstr "" "クラスターãŠã‚ˆã³ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—レートã®åå‰ã‚’自動生æˆã—ã¾ã™ã€‚ ユーザーãŒåå‰" "を指定ã›ãšã«ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼/クラスターテンプレートを作æˆã™ã‚‹ã¨ã€ãã®åå‰ãŒè‡ªå‹•生æˆ" "ã•れã¾ã™ã€‚" msgid "Bug Fixes" msgstr "ãƒã‚°ä¿®æ­£" msgid "" "Change default API development service from wsgiref simple_server to " "werkzeug for better supporting SSL." msgstr "" "SSL をより良ãサãƒãƒ¼ãƒˆã™ã‚‹ãŸã‚ã«ã€ãƒ‡ãƒ•ォルト㮠API 開発サービスを wsgiref " "simple_server ã‹ã‚‰werkzeug ã«å¤‰æ›´ã—ã¦ãã ã•ã„。" msgid "" "Change service type from \"Container service\" to \"Container Infrastructure " "Management service\". In addition, the mission statement is changed to \"To " "provide a set of services for provisioning, scaling, and managing container " "orchestration engines.\"" msgstr "" "サービスタイプを「コンテナーサービスã€ã‹ã‚‰ã€Œã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã‚¤ãƒ³ãƒ•ラ管ç†ã‚µãƒ¼ãƒ“スã€" "ã«å¤‰æ›´ã—ã¾ã™ã€‚ã•らã«ã€ãƒŸãƒƒã‚·ãƒ§ãƒ³ã‚¹ãƒ†ãƒ¼ãƒˆãƒ¡ãƒ³ãƒˆãŒã€Œã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã‚ªãƒ¼ã‚±ã‚¹ãƒˆãƒ¬ãƒ¼" "ションエンジンã®ãƒ—ロビジョニングã€ã‚¹ã‚±ãƒ¼ãƒªãƒ³ã‚°ã€ãŠã‚ˆã³ç®¡ç†ã®ãŸã‚ã®ä¸€é€£ã®ã‚µãƒ¼" "ビスをæä¾›ã™ã‚‹ã€ã«å¤‰æ›´ã•れã¾ã—ãŸã€‚" msgid "Contents:" msgstr "内容:" msgid "Current Series Release Notes" msgstr "開発中ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "" "Current implementation of magnum bay operations are synchronous and as a " "result API requests are blocked until response from HEAT service is " "received. This release adds support for asynchronous bay operations (bay-" "create, bay-update, and bay-delete). Please note that with this change, bay-" "create, bay-update API calls will return bay uuid instead of bay object and " "also return HTTP status code 202 instead of 201. Microversion 1.2 is added " "for new behavior." msgstr "" "Magnum ã®ãƒ™ã‚¤æ“作ã®ç¾åœ¨ã®å®Ÿè£…ã¯åŒæœŸçš„ã§ã‚りã€ãã®çµæžœã€ HEAT サービスã‹ã‚‰ã®å¿œ" "ç­”ãŒå—ä¿¡ã•れるã¾ã§ API リクエストãŒãƒ–ロックã•れã¾ã™ã€‚ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€éžåŒæœŸ" "ã®ãƒ™ã‚¤æ“作(ベイ作æˆã€ãƒ™ã‚¤æ›´æ–°ã€ãŠã‚ˆã³ãƒ™ã‚¤å‰Šé™¤ï¼‰ã®ã‚µãƒãƒ¼ãƒˆãŒè¿½åŠ ã•れã¦ã„ã¾" "ã™ã€‚ã“ã®å¤‰æ›´ã«ã‚ˆã‚Šã€ bay-create 〠bay-update ã® API コールã¯ãƒ™ã‚¤ã‚ªãƒ–ジェクト" "ã§ã¯ãªãベイ uuid ã‚’è¿”ã—〠201 ã§ã¯ãªã HTTP ステータスコード 202 ã‚’è¿”ã—ã¾" "ã™ã€‚マイクロãƒãƒ¼ã‚¸ãƒ§ãƒ³ 1.2 ãŒæ–°ã—ã„動作ã®ãŸã‚ã«è¿½åŠ ã•れã¦ã„ã¾ã™ã€‚" msgid "" "Currently, the swarm and the kubernetes drivers use a dedicated cinder " "volume to store the container images. It was been observed that one cinder " "volume per node is a bottleneck for large clusters." msgstr "" "ç¾åœ¨ã€ swarm ãŠã‚ˆã³ kubernetes ドライãƒãƒ¼ã¯ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’ä¿å­˜ã™ã‚‹ãŸã‚" "ã«å°‚用ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ãƒœãƒªãƒ¥ãƒ¼ãƒ ã‚’使用ã—ã¦ã„ã¾ã™ã€‚ 1ã¤ã®ãƒŽãƒ¼ãƒ‰ã‚ãŸã‚Š1ã¤ã® Cinder " "ボリュームãŒå¤§ããªã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ãƒœãƒˆãƒ«ãƒãƒƒã‚¯ã§ã‚ã‚‹ã“ã¨ãŒè¦³å¯Ÿã•れã¾ã—ãŸã€‚" msgid "" "Decouple the hard requirement on barbican. Introduce a new certificate store " "called x509keypair. If x509keypair is used, TLS certificates will be stored " "at magnum's database instead of barbican. To do that, set the value of the " "config ``cert_manager_type`` as ``x509keypair``." msgstr "" "Barbican ã®å޳ã—ã„è¦æ±‚を切り離ã—ã¾ã™ã€‚ x509keypair ã¨ã„ã†æ–°ã—ã„証明書ストアを" "å°Žå…¥ã—ã¾ã™ã€‚ x509keypair を使用ã™ã‚‹ã¨ã€TLS 証明書㯠barbican ã®ä»£ã‚り㫠" "magnum ã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã«æ ¼ç´ã•れã¾ã™ã€‚ ã“れを行ã†ã«ã¯ã€è¨­å®š " "``cert_manager_type`` ã®å€¤ã‚’ ``x509keypair`` ã«è¨­å®šã—ã¾ã™ã€‚" msgid "" "Decouple the hard requirement on neutron-lbaas. Introduce a new property " "master_lb_enabled in cluster template. This property will determines if a " "cluster's master nodes should be load balanced. Set the value to false if " "neutron-lbaas is not installed." msgstr "" "neutron-lbaas ã®å޳ã—ã„è¦æ±‚を切り離ã—ã¾ã™ã€‚ ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—ãƒ¬ãƒ¼ãƒˆã«æ–°ã—ã„プロ" "パティ master_lb_enabled ã‚’å°Žå…¥ã—ã¾ã™ã€‚ã“ã®ãƒ—ロパティã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ãƒžã‚¹ã‚¿ãƒ¼" "ノードãŒè² è·åˆ†æ•£ã•れるã¹ãã‹ã©ã†ã‹ã‚’決定ã—ã¾ã™ã€‚ neutron-lbaas ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«" "ã•れã¦ã„ãªã„å ´åˆã¯ã€å€¤ã‚’ false ã«è¨­å®šã—ã¾ã™ã€‚" msgid "Deprecation Notes" msgstr "å»ƒæ­¢äºˆå®šã®æ©Ÿèƒ½" msgid "" "Emit notifications when there is an event on a cluster. An event could be a " "status change of the cluster due to an operation issued by end-users (i.e. " "users create, update or delete the cluster). Notifications are sent by using " "oslo.notify and PyCADF. Ceilometer can capture the events and generate " "samples for auditing, billing, monitoring, or quota purposes." msgstr "" "クラスターã«ã‚¤ãƒ™ãƒ³ãƒˆãŒå­˜åœ¨ã™ã‚‹å ´åˆã«é€šçŸ¥ã‚’é€ä¿¡ã—ã¾ã™ã€‚イベントã¯ã€ã‚¨ãƒ³ãƒ‰ãƒ¦ãƒ¼" "ザーãŒç™ºè¡Œã™ã‚‹æ“作(例ãˆã°ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚’作æˆã€æ›´æ–°ã€ã¾ãŸã¯å‰Šé™¤ã™" "る)ã«ã‚ˆã£ã¦ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ã‚¹ãƒ†ãƒ¼ã‚¿ã‚¹å¤‰æ›´ã¨ãªã‚Šå¾—ã¾ã™ã€‚通知ã¯ã€ oslo.notify " "㨠PyCADF を使用ã—ã¦é€ä¿¡ã•れã¾ã™ã€‚ Ceilometer ã¯ã€ã‚¤ãƒ™ãƒ³ãƒˆã‚’キャプãƒãƒ£ãƒ¼ã—ã€" "監査ã€èª²é‡‘ã€ç›£è¦–ã€ã¾ãŸã¯ã‚¯ã‚©ãƒ¼ã‚¿ã®ç›®çš„ã§ã‚µãƒ³ãƒ—ルを生æˆã§ãã¾ã™ã€‚" msgid "" "Enable Mesos cluster to export more slave flags via labels in cluster " "template. Add the following labels: mesos_slave_isolation, " "mesos_slave_image_providers, mesos_slave_work_dir, and " "mesos_slave_executor_environment_variables." msgstr "" "Mesos クラスターを有効ã«ã—ã¦ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—レートã®ãƒ©ãƒ™ãƒ«ã‚’使用ã—ã¦ã‚¹ãƒ¬ãƒ¼" "ブフラグをã•らã«ã‚¨ã‚¯ã‚¹ãƒãƒ¼ãƒˆã—ã¾ã™ã€‚ 次ã®ãƒ©ãƒ™ãƒ«ã‚’追加ã—ã¾ã™: " "mesos_slave_isolation 〠mesos_slave_image_providers ã€\n" " mesos_slave_work_dir ã€ãŠã‚ˆã³ mesos_slave_executor_environment_variables 。" msgid "" "Every magnum cluster is assigned a trustee user and a trustID. This user is " "used to allow clusters communicate with the key-manager service (Barbican) " "and get the certificate authority of the cluster. This trust user can be " "used by other services too. It can be used to let the cluster authenticate " "with other OpenStack services like the Block Storage service, Object Storage " "service, Load Balancing etc. The cluster with this user and the trustID has " "full access to the trustor's OpenStack project. A new configuration " "parameter has been added to restrict the access to other services than " "Magnum." msgstr "" "ã™ã¹ã¦ã® Magnum クラスターã«ã¯ã€ä¿¡é ¼ã•れるユーザー㨠trustID ãŒå‰²ã‚Šå½“ã¦ã‚‰ã‚Œã¾" "ã™ã€‚ ã“ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãŒéµç®¡ç†ã‚µãƒ¼ãƒ“ス(Barbican)ã¨é€šä¿¡ã—ã€ã‚¯ãƒ©ã‚¹" "ターã®èªè¨¼å±€ã‚’å–å¾—ã§ãるよã†ã«ã™ã‚‹ãŸã‚ã«ä½¿ç”¨ã•れã¾ã™ã€‚ ã“ã®ä¿¡é ¼ãƒ¦ãƒ¼ã‚¶ãƒ¼ã¯ä»–ã®" "サービスã§ã‚‚使用ã§ãã¾ã™ã€‚ ã“れを使用ã—ã¦ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãŒãƒ–ロックストレージサー" "ビスã€ã‚ªãƒ–ジェクトストレージサービスã€ãƒ­ãƒ¼ãƒ‰ãƒãƒ©ãƒ³ã‚·ãƒ³ã‚°ãªã©ã®ä»–ã®OpenStack " "サービスã§èªè¨¼ã•れるよã†ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ã“ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã¨ trustID ã‚’æŒã¤ã‚¯" "ラスターã«ã¯ã€ä¿¡é ¼ã™ã‚‹å´ã® OpenStack プロジェクトã¸ã®ãƒ•ルアクセス権ãŒã‚りã¾" "ã™ã€‚ Magnum 以外ã®ã‚µãƒ¼ãƒ“スã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’制é™ã™ã‚‹ãŸã‚ã®æ–°ã—ã„設定パラメータãŒ" "追加ã•れã¾ã—ãŸã€‚" msgid "" "Fix global stack list in periodic task. In before, magnum's periodic task " "performs a `stack-list` operation across all tenants. This is disabled by " "Heat by default since it causes a security issue. At this release, magnum " "performs a `stack-get` operation on each Heat stack by default. This might " "not be scalable and operators have an option to fall back to `stack-list` by " "setting the config `periodic_global_stack_list` to `True` (`False` by " "default) and updating the heat policy file (usually /etc/heat/policy.json) " "to allow magnum list stacks." msgstr "" "定期的ãªã‚¿ã‚¹ã‚¯ã§ã‚°ãƒ­ãƒ¼ãƒãƒ«ã‚¹ã‚¿ãƒƒã‚¯ãƒªã‚¹ãƒˆã‚’修正ã—ã¾ã™ã€‚ 以å‰ã¯ã€Magnum ã®å®šæœŸ" "çš„ãªã‚¿ã‚¹ã‚¯ã¯ã€ã™ã¹ã¦ã®ãƒ†ãƒŠãƒ³ãƒˆã§ã‚¹ã‚¿ãƒƒã‚¯ãƒªã‚¹ãƒˆæ“作を実行ã—ã¦ã„ã¾ã—ãŸã€‚ ã“れã¯" "セキュリティ上ã®å•題ãŒç™ºç”Ÿã™ã‚‹ãŸã‚ã€ãƒ‡ãƒ•ォルト㧠Heat ã«ã‚ˆã£ã¦ç„¡åйã«ãªã£ã¦ã„" "ã¾ã™ã€‚ ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€Magnum ã¯ãƒ‡ãƒ•ォルトã§å„ Heat スタックã«å¯¾ã—㦠" "`stack-get` æ“作を実行ã—ã¾ã™ã€‚ ã“れã¯ã‚¹ã‚±ãƒ¼ãƒ©ãƒ–ルã§ã¯ãªã„ã‹ã‚‚ã—れã¾ã›ã‚“ã®ã§ã€" "オペレーター㯠`periodic_global_stack_list` ã‚’ `True`(デフォルト㧠`False`)" "ã«è¨­å®šã—ã€Heat ãƒãƒªã‚·ãƒ¼ãƒ•ァイル(通常㯠/etc/heat/policy.json )を Magnum ã«" "スタックã®ä¸€è¦§å–得を許å¯ã™ã‚‹ã‚ˆã†æ›´æ–°ã™ã‚‹ã“ã¨ã§ `stack-list` ã¸ã®é€€è¡ŒãŒå¯èƒ½ã§" "ã™ã€‚" msgid "" "Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have " "to be re-created to benefit from this fix. Part of this fix is the newly " "introduced setting `cluster_user_trust` in the `trust` section of magnum." "conf. This setting defaults to False. `cluster_user_trust` dictates whether " "to allow passing a trust ID into a cluster's instances. For most clusters " "this capability is not needed. Clusters with `registry_enabled=True` or " "`volume_driver=rexray` will need this capability. Other features that " "require this capability may be introduced in the future. To be able to " "create such clusters you will need to set `cluster_user_trust` to True." msgstr "" "æ–°ã—ã作æˆã•れãŸã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã® CVE-2016-7404 を修正ã—ã¾ã—ãŸã€‚ã“ã®å•題を解決ã™ã‚‹" "ã«ã¯ã€æ—¢å­˜ã®ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚’å†ä½œæˆã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ã“ã®ä¿®æ­£ã®ä¸€éƒ¨ã¯ã€magnum." "conf ã® `trust` ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã«æ–°ã—ãå°Žå…¥ã•れ㟠`cluster_user_trust` ã®è¨­å®šã§" "ã™ã€‚ã“ã®è¨­å®šã®æ—¢å®šå€¤ã¯Falseã§ã™ã€‚ `cluster_user_trust` ã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ã‚¤ãƒ³ã‚¹" "タンスã«ãƒˆãƒ©ã‚¹ãƒˆ ID を渡ã™ã“ã¨ã‚’許å¯ã™ã‚‹ã‹ã©ã†ã‹ã‚’指定ã—ã¾ã™ã€‚ã»ã¨ã‚“ã©ã®ã‚¯ãƒ©" "スターã§ã¯ã€ã“ã®æ©Ÿèƒ½ã¯å¿…è¦ã‚りã¾ã›ã‚“。 `registry_enabled=True` ã¾ãŸã¯ " "`volume_driver=rexray` ã®ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã«ã¯ã“ã®æ©Ÿèƒ½ãŒå¿…è¦ã§ã™ã€‚ã“ã®æ©Ÿèƒ½ã‚’å¿…è¦ã¨ã™" "ã‚‹ãã®ä»–ã®æ©Ÿèƒ½ãŒå°†æ¥å°Žå…¥ã•れるå¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ã“ã®ã‚ˆã†ãªã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚’作æˆã™" "ã‚‹ã«ã¯ã€ `cluster_user_trust` ã‚’ True ã«è¨­å®šã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" msgid "" "Include kubernetes dashboard in kubernetes cluster by default. Users can use " "this kubernetes dashboard to manage the kubernetes cluster. Dashboard can be " "disabled by setting the label 'kube_dashboard_enabled' to false." msgstr "" "kubernetes ダッシュボードをデフォルト㧠kubernetes クラスターã«å«ã‚ã¾ã™ã€‚ " "ユーザーã¯ã“ã®kubernetes ダッシュボードを使用ã—㦠kubernetes クラスターを管ç†" "ã§ãã¾ã™ã€‚ 'kube_dashboard_enabled' ã¨ã„ã†ãƒ©ãƒ™ãƒ«ã‚’ false ã«è¨­å®šã™ã‚‹ã¨ã€ãƒ€ãƒƒ" "シュボードを無効ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚" msgid "" "Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and " "Grafana. Users can enable this stack through the label " "prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes " "cluster and then serves them to Grafana through Grafana's Prometheus data " "source. Upon completion, a default Grafana dashboard is provided." msgstr "" "cAdvisor 〠node-exporter 〠Prometheus 〠Grafana ã«åŸºã¥ã監視スタックをå«ã¿" "ã¾ã™ã€‚ ユーザーã¯ã€ã“ã®ã‚¹ã‚¿ãƒƒã‚¯ã‚’ prometheus_monitoring ã¨ã„ã†ãƒ©ãƒ™ãƒ«ã§æœ‰åйã«" "ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ Prometheus 㯠Kubernetes クラスターã‹ã‚‰ãƒ¡ãƒˆãƒªã‚¯ã‚¹ã‚’スク" "ラップã—〠Grafana ã® Prometheus データソースを通ã˜ã¦ Grafana ã«æä¾›ã—ã¾ã™ã€‚" "完了ã™ã‚‹ã¨ã€ãƒ‡ãƒ•ォルト㮠Grafana ãƒ€ãƒƒã‚·ãƒ¥ãƒœãƒ¼ãƒ‰ãŒæä¾›ã•れã¾ã™ã€‚" msgid "Indices and tables" msgstr "目次ã¨è¡¨" msgid "" "Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service " "(cinder). The rexray volume driver was added based on rexray v0.4. Users can " "create and attach volumes using docker's navive client and they will " "authenticate using the per cluster trustee user. Rexray can be either added " "in the Fedora Atomic image or can be used running in a container." msgstr "" "Docker Swarm Fedora Atomic ドライãƒãƒ¼ã¨ãƒ–ロックストレージサービス(cinder)を" "çµ±åˆã—ã¾ã™ã€‚ rexray v0.4ã«åŸºã¥ã„ã¦ã€ rexray ボリュームドライãƒãƒ¼ãŒè¿½åŠ ã•れã¾" "ã—ãŸã€‚ユーザーã¯ã€ Docker ã®ãƒã‚¤ãƒ†ã‚£ãƒ–クライアントを使用ã—ã¦ãƒœãƒªãƒ¥ãƒ¼ãƒ ã‚’作æˆ" "ã—ã¦æŽ¥ç¶šã™ã‚‹ã“ã¨ãŒã§ãã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã”ã¨ã®ä¿¡é ¼ã•れるユーザーを使用ã—ã¦èªè¨¼ã•れ" "ã¾ã™ã€‚ Rexray ã¯ã€ Fedora Atomic イメージã«è¿½åŠ ã™ã‚‹ã“ã¨ã‚‚ã€ã‚³ãƒ³ãƒ†ãƒŠãƒ¼å†…ã§å®Ÿè¡Œ" "ã™ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™ã€‚" msgid "" "Keypair is now optional for ClusterTemplate, in order to allow Clusters to " "use keypairs separate from their parent ClusterTemplate." msgstr "" "クラスターãŒè¦ªã®ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—レートã¨ã¯åˆ¥ã®ã‚­ãƒ¼ãƒšã‚¢ã‚’使用ã§ãるよã†ã«ã™ã‚‹" "ãŸã‚ã€ã‚­ãƒ¼ãƒšã‚¢ã¯ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—レートã§ã¯ã‚ªãƒ—ションã«ãªã‚Šã¾ã—ãŸã€‚" msgid "" "Keystone URL used by Cluster Templates instances to authenticate is now " "configurable with the ``trustee_keystone_interface`` parameter which default " "to ``public``." msgstr "" "クラスターテンプレートインスタンスãŒèªè¨¼ã«ä½¿ç”¨ã™ã‚‹ Keystone URL ã¯ã€ãƒ‡ãƒ•ォル" "ト㧠``public`` ã® ``trustee_keystone_interface`` パラメータã§è¨­å®šå¯èƒ½ã«ãªã‚Š" "ã¾ã—ãŸã€‚" msgid "Liberty Series Release Notes" msgstr "Liberty ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "" "Magnum bay operations API default behavior changed from synchronous to " "asynchronous. User can specify OpenStack-API-Version 1.1 in request header " "for synchronous bay operations." msgstr "" "Magnum ã®ãƒ™ã‚¤æ“作 API ã®ãƒ‡ãƒ•ォルト動作ãŒåŒæœŸã‹ã‚‰éžåŒæœŸã«å¤‰æ›´ã•れã¾ã—ãŸã€‚åŒæœŸ" "çš„ãªãƒ™ã‚¤æ“作ã®ãŸã‚ã«ã€ãƒªã‚¯ã‚¨ã‚¹ãƒˆãƒ˜ãƒƒãƒ€ãƒ¼ã« OpenStack-API-Version 1.1 を指定ã™" "ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚" msgid "" "Magnum default service type changed from \"container\" to \"container-infra" "\". It is recommended to update the service type at Keystone service catalog " "accordingly." msgstr "" "Magnum ã®ãƒ‡ãƒ•ォルトã®ã‚µãƒ¼ãƒ“スタイプãŒã€Œcontainerã€ã‹ã‚‰ã€Œcontainer-infraã€ã«å¤‰" "æ›´ã•れã¾ã—ãŸã€‚ ã“れã«å¿œã˜ã¦ã€ Keystone サービスカタログã®ã‚µãƒ¼ãƒ“スタイプを更新" "ã™ã‚‹ã“ã¨ã‚’ãŠå‹§ã‚ã—ã¾ã™ã€‚" msgid "" "Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can " "enable OSProfiler via Magnum configuration file in 'profiler' section." msgstr "" "Magnum ã¯ã€ HTTP 〠RPC 〠DB ã®è¦æ±‚トレース用㮠OSProfiler をサãƒãƒ¼ãƒˆã—ã¾" "ã™ã€‚ ユーザーã¯ã€ 'profiler' セクション㮠Magnum 設定ファイル経由㧠" "OSProfiler を有効ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚" msgid "" "Magnum now support SSL for API service. User can enable SSL for API via new " "3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'." msgstr "" "Magnum ã¯ç¾åœ¨ã€API サービス用㮠SSL をサãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã™ã€‚ ユーザーã¯ã€æ–°ã—ã„" "3ã¤ã®è¨­å®šã‚ªãƒ—ション 'enabled_ssl' 〠'ssl_cert_file' 〠'ssl_key_file' を使用" "ã—ã¦ã€ API ã« SSL を有効ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚" msgid "Magnum service type and mission statement was changed [1]." msgstr "" "マグナムã®ã‚µãƒ¼ãƒ“スタイプã¨ãƒŸãƒƒã‚·ãƒ§ãƒ³ã‚¹ãƒ†ãƒ¼ãƒˆãƒ¡ãƒ³ãƒˆã«é–¢ã™ã‚‹è¨˜è¿°ãŒå¤‰æ›´ã•れã¾ã—" "㟠[1]。" msgid "" "Magnum's bay-to-cluster blueprint [1] required changes across much of its " "codebase to align to industry standards. To support this blueprint, certain " "group and option names were changed in configuration files [2]. See the " "deprecations section for more details. [1] https://review.openstack.org/#/q/" "topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgstr "" "Magnum ã® bay-to-cluster ã®ãƒ–ループリント [1] ã¯ã€æ¥­ç•Œæ¨™æº–ã«åˆã‚ã›ã¦ã‚³ãƒ¼ãƒ‰" "ベースã®å¤šãを変更ã™ã‚‹å¿…è¦ãŒã‚りã¾ã—ãŸã€‚ ã“ã®ãƒ–ループリントをサãƒãƒ¼ãƒˆã™ã‚‹ãŸã‚" "ã«ã€ç‰¹å®šã®ã‚°ãƒ«ãƒ¼ãƒ—åã¨ã‚ªãƒ—ションåãŒè¨­å®šãƒ•ァイル [2] ã§å¤‰æ›´ã•れã¾ã—ãŸã€‚ 詳細" "ã¯éžæŽ¨å¥¨ã®ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 [1] https://review.openstack.org/#/" "q/topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgid "" "Magnum's keypair-override-on-create blueprint [1] allows for optional " "keypair value in ClusterTemplates and the ability to specify a keypair value " "during cluster creation." msgstr "" "Magnum ã® keypair-override-on-create ブループリント [1] ã§ã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³" "プレートã®ä»»æ„ã®ã‚­ãƒ¼ãƒšã‚¢å€¤ã¨ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ä½œæˆæ™‚ã®ã‚­ãƒ¼ãƒšã‚¢å€¤ã®æŒ‡å®šãŒå¯èƒ½ã§ã™ã€‚" msgid "" "Make the dedicated cinder volume per node an opt-in option. By default, no " "cinder volumes will be created unless the user passes the docker-volume-size " "argument." msgstr "" "ノードã”ã¨ã«å°‚用㮠Cinder ボリュームをオプトインオプションã«ã—ã¾ã™ã€‚ デフォル" "トã§ã¯ã€ãƒ¦ãƒ¼ã‚¶ãŒdocker-volume-size 引数を渡ã•ãªã„é™ã‚Šã€ Cinder ボリュームã¯ä½œ" "æˆã•れã¾ã›ã‚“。" msgid "Mitaka Series Release Notes" msgstr "Mitaka ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "Ocata Series Release Notes" msgstr "Ocata ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "Pike Series Release Notes" msgstr "Pike ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒªãƒªãƒ¼ã‚¹ãƒŽãƒ¼ãƒˆ" msgid "Prelude" msgstr "紹介" msgid "" "Secure etcd cluster for swarm and k8s. Etcd cluster is secured using TLS by " "default. TLS can be disabled by passing --tls-disabled during cluster " "template creation." msgstr "" "swarm ã‚„ k8s ã®ãŸã‚ã® etcd クラスターを安全ã«ã—ã¾ã—ãŸã€‚ Etcd クラスターã¯ã€ãƒ‡" "フォルト㧠TLS を使用ã—ã¦ä¿è­·ã•れã¾ã™ã€‚ TLS ã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãƒ†ãƒ³ãƒ—レートã®ä½œæˆ" "時㫠--tls-disabled を渡ã™ã“ã¨ã§ç„¡åйã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚" msgid "Security Issues" msgstr "セキュリティー上ã®å•題" msgid "" "Support different volume types for the drivers that support docker storage " "in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new " "label to specify a docker_volume_type." msgstr "" "Docker ストレージを Cinder ボリュームã§ã‚µãƒãƒ¼ãƒˆã™ã‚‹ãƒ‰ãƒ©ã‚¤ãƒã®ã•ã¾ã–ã¾ãªãƒœ" "リュームタイプをサãƒãƒ¼ãƒˆã—ã¾ã™ã€‚ swarm_fedora_atomic ãŠã‚ˆã³ " "k8s_fedora_atomic 㯠docker_volume_type を指定ã™ã‚‹æ–°ã—ã„ラベルをå—ã‘入れã¾" "ã™ã€‚" msgid "" "The 'bay' group has been renamed to 'cluster' and all options in the former " "'bay' group have been moved to 'cluster'." msgstr "" "'bay' グループã®åå‰ãŒ 'cluster' ã«å¤‰æ›´ã•れã€ä»¥å‰ã® 'bay' グループã®ã™ã¹ã¦ã®" "オプション㌠'cluster' ã«ç§»å‹•ã•れã¾ã—ãŸã€‚" msgid "" "The 'bay_create_timeout' option in the former 'bay_heat' group has been " "renamed to 'create_timeout' inside the 'cluster_heat' group." msgstr "" "以å‰ã® 'bay_heat' グループ㮠'bay_create_timeout' オプション㯠" "'cluster_heat' グループ内㮠'create_timeout' ã«åå‰ãŒå¤‰æ›´ã•れã¾ã—ãŸã€‚" msgid "" "The 'bay_heat' group has been renamed to 'cluster_heat' and all options in " "the former 'bay_heat' group have been moved to 'cluster_heat'." msgstr "" "'bay_heat' グループã®åå‰ãŒ 'cluster_heat' ã«å¤‰æ›´ã•れã€ä»¥å‰ã® 'bay_heat' ã‚°" "ループã®ã™ã¹ã¦ã®ã‚ªãƒ—ション㌠'cluster_heat' ã«ç§»å‹•ã•れã¾ã—ãŸã€‚" msgid "" "The 'baymodel' group has been renamed to 'cluster_template' and all options " "in the former 'baymodel' group have been moved to 'cluster_template'." msgstr "" "'baymodel' グループã®åå‰ãŒ 'cluster_template' ã«å¤‰æ›´ã•れã€ä»¥å‰ã® 'baymodel' " "グループã®ã™ã¹ã¦ã®ã‚ªãƒ—ション㌠'cluster_template' ã«ç§»å‹•ã•れã¾ã—ãŸã€‚" msgid "" "The intend is to narrow the scope of the Magnum project to focus on " "integrating container orchestration engines (COEs) with OpenStack. API " "features intended to uniformly create, manage, and delete individual " "containers across any COE will be removed from Magnum's API, and will be re-" "introduced as a separate project called Zun." msgstr "" "Magnum プロジェクトã®ç¯„囲をé™å®šã—ã¦ã€ OpenStack ã¨ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã‚ªãƒ¼ã‚±ã‚¹ãƒˆãƒ¬ãƒ¼" "ションエンジン(COE)を統åˆã™ã‚‹ã“ã¨ã«ç„¦ç‚¹ã‚’当ã¦ã‚‹äºˆå®šã§ã™ã€‚ä»»æ„ã® COE ã®å€‹ã€…" "ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã‚’å‡ä¸€ã«ä½œæˆã€ç®¡ç†ã€å‰Šé™¤ã™ã‚‹ãŸã‚ã® API 機能ã¯ã€ Magnum ã® API ã‹" "ら削除ã•れ〠Zun ã¨ã„ã†åˆ¥ã®ãƒ—ロジェクトã¨ã—ã¦å†å°Žå…¥ã•れã¾ã™ã€‚" msgid "" "This release introduces 'quota' endpoint that enable admin users to set, " "update and show quota for a given tenant. A non-admin user can get self " "quota limits." msgstr "" "ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€ç®¡ç†è€…ãŒç‰¹å®šã®ãƒ†ãƒŠãƒ³ãƒˆã®ã‚¯ã‚©ãƒ¼ã‚¿ã‚’è¨­å®šã€æ›´æ–°ã€ãŠã‚ˆã³è¡¨ç¤ºã§" "ãる「クォータã€ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆãŒå°Žå…¥ã•れã¦ã„ã¾ã™ã€‚管ç†è€…ã§ãªã„ユーザーã¯ã€è‡ª" "分ã®ã‚¯ã‚©ãƒ¼ã‚¿åˆ¶é™ã‚’å–å¾—ã§ãã¾ã™ã€‚" msgid "" "This release introduces 'stats' endpoint that provide the total number of " "clusters and the total number of nodes for the given tenant and also overall " "stats across all the tenants." msgstr "" "ã“ã®ãƒªãƒªãƒ¼ã‚¹ã§ã¯ã€ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ç·æ•°ã¨ã€ç‰¹å®šã®ãƒ†ãƒŠãƒ³ãƒˆã®ãƒŽãƒ¼ãƒ‰ã®ç·æ•°ã€ãŠã‚ˆã³ã™" "ã¹ã¦ã®ãƒ†ãƒŠãƒ³ãƒˆå…¨ä½“ã®çµ±è¨ˆæƒ…報をæä¾›ã™ã‚‹ã€Œçµ±è¨ˆã€ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆãŒå°Žå…¥ã•れã¦ã„ã¾" "ã™ã€‚" msgid "" "To let clusters communicate directly with OpenStack service other than " "Magnum, in the `trust` section of magnum.conf, set `cluster_user_trust` to " "True. The default value is False." msgstr "" "クラスター㌠Magnum 以外㮠OpenStack サービスã¨ç›´æŽ¥é€šä¿¡ã§ãるよã†ã«ã™ã‚‹ã«" "ã¯ã€ magnum.conf ã® `trust` セクション㧠`cluster_user_trust` ã‚’ True ã«è¨­å®š" "ã—ã¾ã™ã€‚デフォルト値㯠False ã§ã™ã€‚" msgid "" "Update Swarm default version to 1.2.5. It should be the last version since " "Docker people are now working on the new Swarm mode integrated in Docker." msgstr "" "Swarm ã®ãƒ‡ãƒ•ォルトãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ 1.2.5 ã«ã‚¢ãƒƒãƒ—デートã—ã¾ã™ã€‚ Docker ã®äººã€…㌠" "Docker ã«çµ±åˆã•ã‚ŒãŸæ–°ã—ã„ Swarm モードã«å–り組んã§ã„ã‚‹ã®ã§ã€ã“ã‚Œã¯æœ€å¾Œã®ãƒãƒ¼" "ジョンã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" msgid "Upgrade Notes" msgstr "ã‚¢ãƒƒãƒ—ã‚°ãƒ¬ãƒ¼ãƒ‰æ™‚ã®æ³¨æ„" msgid "Welcome to Magnum Release Notes's documentation!" msgstr "Magnum リリースノートã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã«ã‚ˆã†ã“ãï¼" msgid "[1] https://review.openstack.org/#/c/311476/" msgstr "[1] https://review.openstack.org/#/c/311476/" magnum-6.1.0/releasenotes/source/liberty.rst0000666000175100017510000000022013244017334021220 0ustar zuulzuul00000000000000============================= Liberty Series Release Notes ============================= .. release-notes:: :branch: origin/stable/liberty magnum-6.1.0/releasenotes/source/newton.rst0000666000175100017510000000023213244017334021063 0ustar zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton magnum-6.1.0/releasenotes/notes/0000775000175100017510000000000013244017675016660 5ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml0000666000175100017510000000112113244017334027001 0ustar zuulzuul00000000000000--- features: - Add flannel's host-gw backend option. Magnum deploys cluster over a dedicated neutron private network by using flannel. Flannel's host-gw backend gives the best performance in this topopolgy (private layer2) since there is no packet processing overhead, no reduction to MTU, scales to many hosts as well as the alternatives. The label "flannel_use_vxlan" was repurposed when the network driver is flannel. First, rename the label flannel_use_vxlan to flannel_backend. Second, redefine the value of this label from "yes/no" to "udp/vxlan/host-gw". magnum-6.1.0/releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml0000666000175100017510000000042613244017334027532 0ustar zuulzuul00000000000000--- upgrade: - Magnum now support SSL for API service. User can enable SSL for API via new 3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'. - Change default API development service from wsgiref simple_server to werkzeug for better supporting SSL. magnum-6.1.0/releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml0000666000175100017510000000103313244017334027204 0ustar zuulzuul00000000000000--- features: - | Prefix of all container images used in the cluster (kubernetes components, coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, with this label it can be changed to myregistry.example.com/mycloud/kubernetes-apiserver. Similarly, all other components used in the cluster will be prefixed with this label, which assumes an operator has cloned all expected images in myregistry.example.com/mycloud. magnum-6.1.0/releasenotes/notes/docker-volume-type-46044734f5a27661.yaml0000666000175100017510000000125213244017334025210 0ustar zuulzuul00000000000000--- features: - | Support different volume types for the drivers that support docker storage in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new label to specify a docker_volume_type. upgrade: - | A new section is created in magnum.conf named cinder. In this cinder section, you need to set a value for the key default_docker_volume_type, which should be a valid type for cinder volumes in your cinder deployment. This default value will be used if no volume_type is provided by the user when using a cinder volume for container storage. The suggested default value the one set in cinder.conf of your cinder deployment. magnum-6.1.0/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml0000666000175100017510000000057313244017334027256 0ustar zuulzuul00000000000000--- features: - | Update kubernetes dashboard to `v1.8.3` which is compatible via kubectl proxy. Addionally, heapster is deployed as standalone deployemt and the user can enable a grafana-influx stack with the `influx_grafana_dashboard_enabled` label. See the kubernetes dashboard documenation for more details. https://github.com/kubernetes/dashboard/wiki magnum-6.1.0/releasenotes/notes/quota-api-182cd1bc9e706b17.yaml0000666000175100017510000000027113244017334023650 0ustar zuulzuul00000000000000--- features: - This release introduces 'quota' endpoint that enable admin users to set, update and show quota for a given tenant. A non-admin user can get self quota limits. magnum-6.1.0/releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml0000666000175100017510000000037713244017334026215 0ustar zuulzuul00000000000000--- features: - | Include kubernetes dashboard in kubernetes cluster by default. Users can use this kubernetes dashboard to manage the kubernetes cluster. Dashboard can be disabled by setting the label 'kube_dashboard_enabled' to false. magnum-6.1.0/releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml0000666000175100017510000000061313244017334026661 0ustar zuulzuul00000000000000--- features: - Emit notifications when there is an event on a cluster. An event could be a status change of the cluster due to an operation issued by end-users (i.e. users create, update or delete the cluster). Notifications are sent by using oslo.notify and PyCADF. Ceilometer can capture the events and generate samples for auditing, billing, monitoring, or quota purposes. magnum-6.1.0/releasenotes/notes/bug-1663757-198e1aa8fa810984.yaml0000666000175100017510000000115113244017334023315 0ustar zuulzuul00000000000000--- fixes: - | [`bug 1663757 `_] A configuration parameter, verify_ca, was added to magnum.conf with a default value of True and passed to the heat templates to indicate whether the cluster nodes validate the Certificate Authority when making requests to the OpenStack APIs (Keystone, Magnum, Heat). This parameter can be set to False to disable CA validation if you have self-signed certificates for the OpenStack APIs or you have your own Certificate Authority and you have not installed the Certificate Authority to all nodes. magnum-6.1.0/releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml0000666000175100017510000000027513244017334025654 0ustar zuulzuul00000000000000--- features: - Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. This driver is experimental for now, and operators need to get it from /contrib folder. magnum-6.1.0/releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml0000666000175100017510000000041513244017334027652 0ustar zuulzuul00000000000000--- features: - Add Microversion 1.3 to support Magnum bay rollback, user can enable rollback on bay update failure by setting 'OpenStack-API-Version' to 'container-infra 1.3' in request header and passing 'rollback=True' param in bay update request. magnum-6.1.0/releasenotes/notes/bug-1580704-32a0e91e285792ea.yaml0000666000175100017510000000045513244017334023303 0ustar zuulzuul00000000000000--- security: - | Add new configuration option `openstack_ca_file` in the `drivers` section to pass the CA bundle used for the OpenStack API. Setting this file and setting `verify_ca` to `true` will result to all requests from the cluster nodes to the OpenStack APIs to be verified. magnum-6.1.0/releasenotes/notes/update-swarm-73d4340a881bff2f.yaml0000666000175100017510000000025713244017334024371 0ustar zuulzuul00000000000000--- features: - Update Swarm default version to 1.2.5. It should be the last version since Docker people are now working on the new Swarm mode integrated in Docker. magnum-6.1.0/releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml0000666000175100017510000000025613244017334026350 0ustar zuulzuul00000000000000--- features: - Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can enable OSProfiler via Magnum configuration file in 'profiler' section. magnum-6.1.0/releasenotes/notes/support-all-tenants-for-admin-a042f5c520d35837.yaml0000666000175100017510000000012113244017334027405 0ustar zuulzuul00000000000000--- features: - | Now admin user can access all clusters across projects. magnum-6.1.0/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml0000666000175100017510000000015613244017334025317 0ustar zuulzuul00000000000000--- features: - | Add new label 'cert_manager_api' enabling the kubernetes certificate manager api. magnum-6.1.0/releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml0000666000175100017510000000030513244017334026770 0ustar zuulzuul00000000000000--- features: - | Secure etcd cluster for swarm and k8s. Etcd cluster is secured using TLS by default. TLS can be disabled by passing --tls-disabled during cluster template creation. magnum-6.1.0/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml0000666000175100017510000000050413244017334024457 0ustar zuulzuul00000000000000--- features: - | Update k8s_fedora_atomic driver to the latest Fedora Atomic 27 release and run etcd and flanneld in system containers which are removed from the base OS. upgrade: - | New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is the default version in the queens release. magnum-6.1.0/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml0000666000175100017510000000023113244017334025737 0ustar zuulzuul00000000000000--- issues: - | Adding 'calico' as network driver for Kubernetes so as to support network isolation between namespace with k8s network policy. magnum-6.1.0/releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml0000666000175100017510000000026613244017334026325 0ustar zuulzuul00000000000000--- features: - Auto generate name for cluster and cluster-template. If users create a cluster/cluster-template without specifying a name, the name will be auto-generated. magnum-6.1.0/releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml0000666000175100017510000000112213244017334026053 0ustar zuulzuul00000000000000--- security: - Fix global stack list in periodic task. In before, magnum's periodic task performs a `stack-list` operation across all tenants. This is disabled by Heat by default since it causes a security issue. At this release, magnum performs a `stack-get` operation on each Heat stack by default. This might not be scalable and operators have an option to fall back to `stack-list` by setting the config `periodic_global_stack_list` to `True` (`False` by default) and updating the heat policy file (usually /etc/heat/policy.json) to allow magnum list stacks. magnum-6.1.0/releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml0000666000175100017510000000170013244017334026775 0ustar zuulzuul00000000000000--- prelude: | Magnum service type and mission statement was changed [1]. Change service type from "Container service" to "Container Infrastructure Management service". In addition, the mission statement is changed to "To provide a set of services for provisioning, scaling, and managing container orchestration engines." The intend is to narrow the scope of the Magnum project to focus on integrating container orchestration engines (COEs) with OpenStack. API features intended to uniformly create, manage, and delete individual containers across any COE will be removed from Magnum's API, and will be re-introduced as a separate project called Zun. [1] https://review.openstack.org/#/c/311476/ upgrade: - All container/pod/service/replication controller operations were removed. Users are recommended to use the COE's native tool (i.e. docker, kubectl) to do the equivalent of the removed operations. magnum-6.1.0/releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml0000666000175100017510000000117713244017334027725 0ustar zuulzuul00000000000000--- prelude: > Magnum's keypair-override-on-create blueprint [1] allows for optional keypair value in ClusterTemplates and the ability to specify a keypair value during cluster creation. features: - Added parameter in cluster-create to specify the keypair. If keypair is not provided, the default value from the matching ClusterTemplate will be used. - Keypair is now optional for ClusterTemplate, in order to allow Clusters to use keypairs separate from their parent ClusterTemplate. deprecations: - --keypair-id parameter in magnum CLI cluster-template-create has been renamed to --keypair. magnum-6.1.0/releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml0000666000175100017510000000043313244017334025154 0ustar zuulzuul00000000000000--- features: - Decouple the hard requirement on neutron-lbaas. Introduce a new property master_lb_enabled in cluster template. This property will determines if a cluster's master nodes should be load balanced. Set the value to false if neutron-lbaas is not installed. magnum-6.1.0/releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml0000666000175100017510000000031713244017334026170 0ustar zuulzuul00000000000000--- features: - Add microversion 1.5 to support rotation of a cluster's CA certificate. This gives admins a way to restrict/deny access to an existing cluster once a user has been granted access. magnum-6.1.0/releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml0000666000175100017510000000057613244017334027005 0ustar zuulzuul00000000000000--- features: - | Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and Grafana. Users can enable this stack through the label prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes cluster and then serves them to Grafana through Grafana's Prometheus data source. Upon completion, a default Grafana dashboard is provided. magnum-6.1.0/releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml0000666000175100017510000000034313244017334025406 0ustar zuulzuul00000000000000--- features: - | Support passing an availability zone where all cluster nodes should be deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 and k8s_fedora_atomic_v1 support this new label. magnum-6.1.0/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml0000666000175100017510000000170713244017334026660 0ustar zuulzuul00000000000000--- issues: - | Kubernetes client is incompatible with evenlet and breaks the periodic tasks. After kubernetes client 4.0.0 magnum is affected by the bug below. https://github.com/eventlet/eventlet/issues/147 Magnum has three periodic tasks, one to sync the magnum service, one to update the cluster status and one send cluster metrics The send_metrics task uses the kubernetes client for kubernetes clusters and it crashes the sync_cluster_status and send_cluster_metrics tasks. https://bugs.launchpad.net/magnum/+bug/1746510 Additionally, the kubernetes scale manager needs to be disabled to not break the scale down command completely. Note, that when magnum scales down the cluster will pick the nodes to scale randomly. upgrade: - | In magnum configuration, in [drivers] set send_cluster_metrics = False to to avoid collecting metrics using the kubernetes client which crashes the periodic tasks. magnum-6.1.0/releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml0000666000175100017510000000046313244017334027762 0ustar zuulzuul00000000000000--- features: - Decouple the hard requirement on barbican. Introduce a new certificate store called x509keypair. If x509keypair is used, TLS certificates will be stored at magnum's database instead of barbican. To do that, set the value of the config ``cert_manager_type`` as ``x509keypair``. magnum-6.1.0/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml0000666000175100017510000000053613244017334025752 0ustar zuulzuul00000000000000--- features: - | Add new labels 'ingress_controller' and 'ingress_controller_role' enabling the deployment of a Kubernetes Ingress Controller backend for clusters. Default for 'ingress_controller' is '' (meaning no controller deployed), with possible values being 'traefik'. Default for 'ingress_controller_role' is 'ingress'. magnum-6.1.0/releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml0000666000175100017510000000047013244017334030574 0ustar zuulzuul00000000000000--- features: - | Add support to store the etcd configuration in a cinder volume. k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of the volume. A value of 0 or leaving the label unset means no volume should be used, and the data will go to the instance local storage. magnum-6.1.0/releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml0000666000175100017510000000053413244017334023226 0ustar zuulzuul00000000000000--- features: - | Allow any value to be passed on the docker_storage_driver field by turning it into a StringField (was EnumField), and remove the constraints limiting the values to 'devicemapper' and 'overlay'. upgrade: - | Requires a db upgrade to change the docker_storage_driver field to be a string instead of an enum. magnum-6.1.0/releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml0000666000175100017510000000027613244017334027413 0ustar zuulzuul00000000000000--- features: - Keystone URL used by Cluster Templates instances to authenticate is now configurable with the ``trustee_keystone_interface`` parameter which default to ``public``. magnum-6.1.0/releasenotes/notes/.placeholder0000666000175100017510000000000013244017334021123 0ustar zuulzuul00000000000000magnum-6.1.0/releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml0000666000175100017510000000027213244017334025632 0ustar zuulzuul00000000000000--- upgrade: - Magnum default service type changed from "container" to "container-infra". It is recommended to update the service type at Keystone service catalog accordingly. magnum-6.1.0/releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml0000666000175100017510000000177613244017334027567 0ustar zuulzuul00000000000000--- prelude: > Magnum's bay-to-cluster blueprint [1] required changes across much of its codebase to align to industry standards. To support this blueprint, certain group and option names were changed in configuration files [2]. See the deprecations section for more details. [1] https://review.openstack.org/#/q/topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/ deprecations: - The 'bay' group has been renamed to 'cluster' and all options in the former 'bay' group have been moved to 'cluster'. - The 'bay_heat' group has been renamed to 'cluster_heat' and all options in the former 'bay_heat' group have been moved to 'cluster_heat'. - The 'bay_create_timeout' option in the former 'bay_heat' group has been renamed to 'create_timeout' inside the 'cluster_heat' group. - The 'baymodel' group has been renamed to 'cluster_template' and all options in the former 'baymodel' group have been moved to 'cluster_template'. magnum-6.1.0/releasenotes/notes/stats-api-68bc66147ac027e6.yaml0000666000175100017510000000032313244017334023601 0ustar zuulzuul00000000000000--- features: - This release introduces 'stats' endpoint that provide the total number of clusters and the total number of nodes for the given tenant and also overall stats across all the tenants. magnum-6.1.0/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml0000666000175100017510000000062313244017334025401 0ustar zuulzuul00000000000000--- features: - | This release introduces 'federations' endpoint to Magnum API, which allows an admin to create and manage federations of clusters through Magnum. As the feature is still under development, the endpoints are not bound to any driver yet. For more details, please refer to bp/federation-api [1]. [1] https://review.openstack.org/#/q/topic:bp/federation-api magnum-6.1.0/releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml0000666000175100017510000000062013244017334027475 0ustar zuulzuul00000000000000--- features: - Add configuration for overlay networks for the docker network driver in swarm. To use this feature, users need to create a swarm cluster with network_driver set to 'docker'. After the cluster is created, users can create an overlay network (docker network create -d overlay mynetwork) and use it when launching a new container (docker run --net=mynetwork ...). magnum-6.1.0/releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml0000666000175100017510000000130513244017334027460 0ustar zuulzuul00000000000000--- features: - Current implementation of magnum bay operations are synchronous and as a result API requests are blocked until response from HEAT service is received. This release adds support for asynchronous bay operations (bay-create, bay-update, and bay-delete). Please note that with this change, bay-create, bay-update API calls will return bay uuid instead of bay object and also return HTTP status code 202 instead of 201. Microversion 1.2 is added for new behavior. upgrade: - Magnum bay operations API default behavior changed from synchronous to asynchronous. User can specify OpenStack-API-Version 1.1 in request header for synchronous bay operations. magnum-6.1.0/releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml0000666000175100017510000000304413244017334023340 0ustar zuulzuul00000000000000--- upgrade: - | To let clusters communicate directly with OpenStack service other than Magnum, in the `trust` section of magnum.conf, set `cluster_user_trust` to True. The default value is False. security: - | Every magnum cluster is assigned a trustee user and a trustID. This user is used to allow clusters communicate with the key-manager service (Barbican) and get the certificate authority of the cluster. This trust user can be used by other services too. It can be used to let the cluster authenticate with other OpenStack services like the Block Storage service, Object Storage service, Load Balancing etc. The cluster with this user and the trustID has full access to the trustor's OpenStack project. A new configuration parameter has been added to restrict the access to other services than Magnum. fixes: - | Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have to be re-created to benefit from this fix. Part of this fix is the newly introduced setting `cluster_user_trust` in the `trust` section of magnum.conf. This setting defaults to False. `cluster_user_trust` dictates whether to allow passing a trust ID into a cluster's instances. For most clusters this capability is not needed. Clusters with `registry_enabled=True` or `volume_driver=rexray` will need this capability. Other features that require this capability may be introduced in the future. To be able to create such clusters you will need to set `cluster_user_trust` to True. magnum-6.1.0/releasenotes/notes/support-policy-and-doc-in-code-0c19e479dbd953c9.yaml0000666000175100017510000000163213244017334027622 0ustar zuulzuul00000000000000--- features: - | Magnum now support policy in code [1], which means if users didn't modify any of policy rules, they can leave policy file (in `json` or `yaml` format) empty or just remove it all together. Because from now, Magnum keeps all default policies under `magnum/common/policies` module. Users can still modify/generate the policy rules they want in the `policy.yaml` or `policy.json` file which will override the default policy rules in code only if those rules show in the policy file. [1]. https://blueprints.launchpad.net/magnum/+spec/policy-in-code other: - | Default `policy.json` file is now removed as Magnum now generate the default policies in code. Please be aware that when using that file in your environment. upgrade: - | Magnum now supports policy in code, please refer to the relevant features in the release notes for more information. magnum-6.1.0/releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml0000666000175100017510000000036313244017334023250 0ustar zuulzuul00000000000000--- fixes: - | From now on, server names are prefixed with the cluster name. The cluster name is truncated to 30 characters, ('_', '.') are mapped to '-' and non alpha-numeric characters are removed to ensure FQDN compatibility. magnum-6.1.0/releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml0000666000175100017510000000063113244017334027643 0ustar zuulzuul00000000000000--- features: - Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service (cinder). The rexray volume driver was added based on rexray v0.4. Users can create and attach volumes using docker's navive client and they will authenticate using the per cluster trustee user. Rexray can be either added in the Fedora Atomic image or can be used running in a container. magnum-6.1.0/releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml0000666000175100017510000000042013244017334026044 0ustar zuulzuul00000000000000--- features: - > Enable Mesos cluster to export more slave flags via labels in cluster template. Add the following labels: mesos_slave_isolation, mesos_slave_image_providers, mesos_slave_work_dir, and mesos_slave_executor_environment_variables. magnum-6.1.0/releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml0000666000175100017510000000033313244017334030137 0ustar zuulzuul00000000000000--- issues: - | Enhancement to support anfinity policy for cluster nodes. Before this patch, There is no way to gurantee all nodes of a cluster created on different compute hosts to get high availbility. magnum-6.1.0/releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml0000666000175100017510000000111013244017334031404 0ustar zuulzuul00000000000000--- features: - Add docker-storage-driver parameter to baymodel to allow user select from the supported drivers. Until now, only devicemapper was supported. This release adds support for OverlayFS on Fedora Atomic hosts with kernel version >= 3.18 (Fedora 22 or higher) resulting significant performance improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode on the physical machine, but must be disabled in the container. Thus, if you select overlay for docker-storage-driver SELinux will be disable inside the containers. magnum-6.1.0/releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml0000666000175100017510000000063213244017334025000 0ustar zuulzuul00000000000000--- prelude: > Currently, the swarm and the kubernetes drivers use a dedicated cinder volume to store the container images. It was been observed that one cinder volume per node is a bottleneck for large clusters. fixes: - Make the dedicated cinder volume per node an opt-in option. By default, no cinder volumes will be created unless the user passes the docker-volume-size argument. magnum-6.1.0/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml0000666000175100017510000000042713244017334030145 0ustar zuulzuul00000000000000--- fixes: - | Now user can update labels in cluster-template. Previously string is passed as a value to labels, but we know that labels can only hold dictionary values. Now we are parsing the string and storing it as dictionary for labels in cluster-template. magnum-6.1.0/magnum.egg-info/0000775000175100017510000000000013244017675016015 5ustar zuulzuul00000000000000magnum-6.1.0/magnum.egg-info/not-zip-safe0000664000175100017510000000000113244017643020236 0ustar zuulzuul00000000000000 magnum-6.1.0/magnum.egg-info/PKG-INFO0000664000175100017510000000354113244017674017114 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: magnum Version: 6.1.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/magnum.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on ====== Magnum ====== Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. For more information, please refer to the following resources: * **Free software:** under the `Apache license `_ * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Source:** http://git.openstack.org/cgit/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** http://bugs.launchpad.net/magnum * **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 magnum-6.1.0/magnum.egg-info/entry_points.txt0000664000175100017510000000222613244017674021314 0ustar zuulzuul00000000000000[console_scripts] magnum-api = magnum.cmd.api:main magnum-conductor = magnum.cmd.conductor:main magnum-db-manage = magnum.cmd.db_manage:main magnum-driver-manage = magnum.cmd.driver_manage:main [magnum.cert_manager.backend] barbican = magnum.common.cert_manager.barbican_cert_manager local = magnum.common.cert_manager.local_cert_manager x509keypair = magnum.common.cert_manager.x509keypair_cert_manager [magnum.database.migration_backend] sqlalchemy = magnum.db.sqlalchemy.migration [magnum.drivers] k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver k8s_fedora_ironic_v1 = magnum.drivers.k8s_fedora_ironic_v1.driver:Driver mesos_ubuntu_v1 = magnum.drivers.mesos_ubuntu_v1.driver:Driver swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver [oslo.config.opts] magnum = magnum.opts:list_opts magnum.conf = magnum.conf.opts:list_opts [oslo.config.opts.defaults] magnum = magnum.common.config:set_cors_middleware_defaults [oslo.policy.policies] magnum = magnum.common.policies:list_rules magnum-6.1.0/magnum.egg-info/SOURCES.txt0000664000175100017510000012430013244017675017701 0ustar zuulzuul00000000000000.coveragerc .mailmap .testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg functional_creds.conf.sample requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/baymodels.inc api-ref/source/bays.inc api-ref/source/certificates.inc api-ref/source/clusters.inc api-ref/source/clustertemplates.inc api-ref/source/conf.py api-ref/source/index.rst api-ref/source/mservices.inc api-ref/source/parameters.yaml api-ref/source/quotas.inc api-ref/source/stats.inc api-ref/source/status.yaml api-ref/source/urls.inc api-ref/source/versions.inc api-ref/source/samples/bay-create-req.json api-ref/source/samples/bay-create-resp.json api-ref/source/samples/bay-get-all-resp.json api-ref/source/samples/bay-get-one-resp.json api-ref/source/samples/bay-update-req.json api-ref/source/samples/baymodel-create-req.json api-ref/source/samples/baymodel-create-resp.json api-ref/source/samples/baymodel-get-all-resp.json api-ref/source/samples/baymodel-update-req.json api-ref/source/samples/certificates-ca-show-resp.json api-ref/source/samples/certificates-ca-sign-req.json api-ref/source/samples/certificates-ca-sign-resp.json api-ref/source/samples/cluster-create-req.json api-ref/source/samples/cluster-create-resp.json api-ref/source/samples/cluster-get-all-resp.json api-ref/source/samples/cluster-get-one-resp.json api-ref/source/samples/cluster-update-req.json api-ref/source/samples/clustertemplate-create-req.json api-ref/source/samples/clustertemplate-create-resp.json api-ref/source/samples/clustertemplate-get-all-resp.json api-ref/source/samples/clustertemplate-update-req.json api-ref/source/samples/mservice-get-resp.json api-ref/source/samples/quota-create-req.json api-ref/source/samples/quota-create-resp.json api-ref/source/samples/quota-delete-req.json api-ref/source/samples/quota-get-all-resp.json api-ref/source/samples/quota-get-one-resp.json api-ref/source/samples/quota-update-req.json api-ref/source/samples/quota-update-resp.json api-ref/source/samples/stats-get-resp.json api-ref/source/samples/versions-01-get-resp.json api-ref/source/samples/versions-get-resp.json contrib/drivers/dcos_centos_v1/README.md contrib/drivers/dcos_centos_v1/__init__.py contrib/drivers/dcos_centos_v1/driver.py contrib/drivers/dcos_centos_v1/monitor.py contrib/drivers/dcos_centos_v1/scale_manager.py contrib/drivers/dcos_centos_v1/template_def.py contrib/drivers/dcos_centos_v1/version.py contrib/drivers/dcos_centos_v1/image/README.md contrib/drivers/dcos_centos_v1/image/install_imagebuild_deps.sh contrib/drivers/dcos_centos_v1/image/validate_dcos_image.sh contrib/drivers/dcos_centos_v1/image/dcos/elements-deps contrib/drivers/dcos_centos_v1/image/dcos/package-installs.yaml contrib/drivers/dcos_centos_v1/image/dcos/environment.d/10-dcos-install-url contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/99-download-generate-config contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-add-norgoup contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-enable-ntp contrib/drivers/dcos_centos_v1/image/docker/elements-deps contrib/drivers/dcos_centos_v1/image/docker/install.d/50-install-docker contrib/drivers/dcos_centos_v1/image/docker/post-install.d/60-enable-docker-service contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/10-enable-overlay contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/20-configure-docker-service contrib/drivers/dcos_centos_v1/templates/dcoscluster.yaml contrib/drivers/dcos_centos_v1/templates/dcosmaster.yaml contrib/drivers/dcos_centos_v1/templates/dcosslave.yaml contrib/drivers/dcos_centos_v1/templates/lb.yaml contrib/drivers/dcos_centos_v1/templates/secgroup.yaml contrib/drivers/dcos_centos_v1/templates/fragments/configure-dcos.sh contrib/drivers/dcos_centos_v1/templates/fragments/write-heat-params.sh contrib/drivers/heat/dcos_centos_template_def.py contrib/drivers/k8s_opensuse_v1/README.md contrib/drivers/k8s_opensuse_v1/__init__.py contrib/drivers/k8s_opensuse_v1/driver.py contrib/drivers/k8s_opensuse_v1/setup.py contrib/drivers/k8s_opensuse_v1/template_def.py contrib/drivers/k8s_opensuse_v1/version.py contrib/drivers/k8s_opensuse_v1/image/README.md contrib/drivers/k8s_opensuse_v1/image/config.sh contrib/drivers/k8s_opensuse_v1/image/images.sh contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi contrib/drivers/k8s_opensuse_v1/templates/COPYING contrib/drivers/k8s_opensuse_v1/templates/README.md contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-minion.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-master.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-minion.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/create-kubernetes-user.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert-client.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml contrib/templates/example/README.rst contrib/templates/example/setup.py contrib/templates/example/example_template/__init__.py contrib/templates/example/example_template/example.yaml devstack/README.rst devstack/plugin.sh devstack/settings devstack/lib/magnum doc/examples/etc/init/magnum-api.conf doc/examples/etc/init/magnum-conductor.conf doc/examples/etc/logrotate.d/magnum.logrotate doc/examples/etc/systemd/system/magnum-api.service doc/examples/etc/systemd/system/magnum-conductor.service doc/source/conf.py doc/source/index.rst doc/source/admin/configuring.rst doc/source/admin/gmr.rst doc/source/admin/index.rst doc/source/admin/magnum-proxy.rst doc/source/admin/troubleshooting-guide.rst doc/source/configuration/index.rst doc/source/configuration/sample-config.rst doc/source/configuration/sample-policy.rst doc/source/configuration/samples/index.rst doc/source/configuration/samples/policy-yaml.rst doc/source/contributor/api-microversion-history.rst doc/source/contributor/api-microversion.rst doc/source/contributor/contributing.rst doc/source/contributor/functional-test.rst doc/source/contributor/index.rst doc/source/contributor/objects.rst doc/source/contributor/policies.rst doc/source/contributor/quickstart.rst doc/source/contributor/reno.rst doc/source/contributor/troubleshooting.rst doc/source/images/MagnumVolumeIntegration.png doc/source/images/cluster-create.png doc/source/images/cluster-template-details.png doc/source/images/cluster-template.png doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-debian-manual.rst doc/source/install/install-guide-from-source.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/launch-instance.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/install/common/configure_2_edit_magnum_conf.rst doc/source/install/common/configure_3_populate_database.rst doc/source/install/common/prerequisites.rst doc/source/user/cluster-type-definition.rst doc/source/user/heat-templates.rst doc/source/user/index.rst doc/source/user/kubernetes-load-balancer.rst etc/magnum/README-magnum.conf.txt etc/magnum/api-paste.ini etc/magnum/magnum-config-generator.conf etc/magnum/magnum-policy-generator.conf magnum/__init__.py magnum/i18n.py magnum/version.py magnum.egg-info/PKG-INFO magnum.egg-info/SOURCES.txt magnum.egg-info/dependency_links.txt magnum.egg-info/entry_points.txt magnum.egg-info/not-zip-safe magnum.egg-info/pbr.json magnum.egg-info/requires.txt magnum.egg-info/top_level.txt magnum/api/__init__.py magnum/api/app.py magnum/api/app.wsgi magnum/api/attr_validator.py magnum/api/config.py magnum/api/expose.py magnum/api/hooks.py magnum/api/http_error.py magnum/api/rest_api_version_history.rst magnum/api/servicegroup.py magnum/api/utils.py magnum/api/validation.py magnum/api/versioned_method.py magnum/api/controllers/__init__.py magnum/api/controllers/base.py magnum/api/controllers/link.py magnum/api/controllers/root.py magnum/api/controllers/versions.py magnum/api/controllers/v1/__init__.py magnum/api/controllers/v1/bay.py magnum/api/controllers/v1/baymodel.py magnum/api/controllers/v1/certificate.py magnum/api/controllers/v1/cluster.py magnum/api/controllers/v1/cluster_template.py magnum/api/controllers/v1/collection.py magnum/api/controllers/v1/federation.py magnum/api/controllers/v1/magnum_services.py magnum/api/controllers/v1/quota.py magnum/api/controllers/v1/stats.py magnum/api/controllers/v1/types.py magnum/api/middleware/__init__.py magnum/api/middleware/auth_token.py magnum/api/middleware/parsable_error.py magnum/cmd/__init__.py magnum/cmd/api.py magnum/cmd/conductor.py magnum/cmd/db_manage.py magnum/cmd/driver_manage.py magnum/common/__init__.py magnum/common/clients.py magnum/common/config.py magnum/common/context.py magnum/common/docker_utils.py magnum/common/exception.py magnum/common/keystone.py magnum/common/name_generator.py magnum/common/policy.py magnum/common/profiler.py magnum/common/rpc.py magnum/common/rpc_service.py magnum/common/service.py magnum/common/short_id.py magnum/common/urlfetch.py magnum/common/utils.py magnum/common/cert_manager/__init__.py magnum/common/cert_manager/barbican_cert_manager.py magnum/common/cert_manager/cert_manager.py magnum/common/cert_manager/local_cert_manager.py magnum/common/cert_manager/x509keypair_cert_manager.py magnum/common/policies/__init__.py magnum/common/policies/base.py magnum/common/policies/bay.py magnum/common/policies/baymodel.py magnum/common/policies/certificate.py magnum/common/policies/cluster.py magnum/common/policies/cluster_template.py magnum/common/policies/federation.py magnum/common/policies/magnum_service.py magnum/common/policies/quota.py magnum/common/policies/stats.py magnum/common/x509/__init__.py magnum/common/x509/extensions.py magnum/common/x509/operations.py magnum/common/x509/validator.py magnum/conductor/__init__.py magnum/conductor/api.py magnum/conductor/k8s_api.py magnum/conductor/monitors.py magnum/conductor/scale_manager.py magnum/conductor/utils.py magnum/conductor/handlers/__init__.py magnum/conductor/handlers/ca_conductor.py magnum/conductor/handlers/cluster_conductor.py magnum/conductor/handlers/conductor_listener.py magnum/conductor/handlers/federation_conductor.py magnum/conductor/handlers/indirection_api.py magnum/conductor/handlers/common/__init__.py magnum/conductor/handlers/common/cert_manager.py magnum/conductor/handlers/common/trust_manager.py magnum/conductor/tasks/__init__.py magnum/conductor/tasks/heat_tasks.py magnum/conf/__init__.py magnum/conf/api.py magnum/conf/barbican.py magnum/conf/certificates.py magnum/conf/cinder.py magnum/conf/cluster.py magnum/conf/cluster_heat.py magnum/conf/cluster_templates.py magnum/conf/conductor.py magnum/conf/database.py magnum/conf/docker.py magnum/conf/docker_registry.py magnum/conf/drivers.py magnum/conf/glance.py magnum/conf/heat.py magnum/conf/keystone.py magnum/conf/magnum_client.py magnum/conf/neutron.py magnum/conf/nova.py magnum/conf/opts.py magnum/conf/paths.py magnum/conf/profiler.py magnum/conf/quota.py magnum/conf/rpc.py magnum/conf/services.py magnum/conf/trust.py magnum/conf/utils.py magnum/conf/x509.py magnum/db/__init__.py magnum/db/api.py magnum/db/migration.py magnum/db/sqlalchemy/__init__.py magnum/db/sqlalchemy/alembic.ini magnum/db/sqlalchemy/api.py magnum/db/sqlalchemy/migration.py magnum/db/sqlalchemy/models.py magnum/db/sqlalchemy/alembic/README magnum/db/sqlalchemy/alembic/env.py magnum/db/sqlalchemy/alembic/script.py.mako magnum/db/sqlalchemy/alembic/versions/041d9a0f1159_add_flavor_id_to_cluster.py magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py magnum/db/sqlalchemy/alembic/versions/04c625aa95ba_change_storage_driver_to_string.py magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py magnum/db/sqlalchemy/alembic/versions/52bcaf58fecb_add_master_flavor_id_to_cluster.py magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py magnum/db/sqlalchemy/alembic/versions/9a1539f1cd2c_add_federation_table.py magnum/db/sqlalchemy/alembic/versions/a0e7c8450ab1_add_labels_to_cluster.py magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py magnum/drivers/__init__.py magnum/drivers/common/__init__.py magnum/drivers/common/driver.py magnum/drivers/common/k8s_monitor.py magnum/drivers/common/k8s_scale_manager.py magnum/drivers/common/image/fedora-atomic/README.rst magnum/drivers/common/image/fedora-atomic/element-deps magnum/drivers/common/image/fedora-atomic/install_imagebuild_deps.sh magnum/drivers/common/image/fedora-atomic/package-installs.yaml magnum/drivers/common/image/fedora-atomic/validate_atomic_image.sh magnum/drivers/common/image/fedora-atomic/environment.d/50-fedora-atomic magnum/drivers/common/image/fedora-atomic/finalise.d/80-fedora-atomic magnum/drivers/common/image/heat-container-agent/Dockerfile magnum/drivers/common/image/heat-container-agent/config.json.template magnum/drivers/common/image/heat-container-agent/launch magnum/drivers/common/image/heat-container-agent/manifest.json magnum/drivers/common/image/heat-container-agent/service.template magnum/drivers/common/image/heat-container-agent/tmpfiles.template magnum/drivers/common/image/heat-container-agent/scripts/50-heat-config-docker-compose magnum/drivers/common/image/heat-container-agent/scripts/55-heat-config magnum/drivers/common/image/heat-container-agent/scripts/configure_container_agent.sh magnum/drivers/common/image/heat-container-agent/scripts/heat-config-notify magnum/drivers/common/image/heat-container-agent/scripts/write-os-apply-config-templates.sh magnum/drivers/common/image/heat-container-agent/scripts/hooks/atomic magnum/drivers/common/image/heat-container-agent/scripts/hooks/docker-compose magnum/drivers/common/image/heat-container-agent/scripts/hooks/script magnum/drivers/common/templates/lb.yaml magnum/drivers/common/templates/network.yaml magnum/drivers/common/templates/environments/disable_floating_ip.yaml magnum/drivers/common/templates/environments/enable_floating_ip.yaml magnum/drivers/common/templates/environments/no_etcd_volume.yaml magnum/drivers/common/templates/environments/no_master_lb.yaml magnum/drivers/common/templates/environments/no_private_network.yaml magnum/drivers/common/templates/environments/no_volume.yaml magnum/drivers/common/templates/environments/with_etcd_volume.yaml magnum/drivers/common/templates/environments/with_master_lb.yaml magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml magnum/drivers/common/templates/environments/with_private_network.yaml magnum/drivers/common/templates/environments/with_volume.yaml magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml magnum/drivers/common/templates/fragments/atomic-install-openstack-ca.sh magnum/drivers/common/templates/fragments/configure-docker-registry.sh magnum/drivers/common/templates/fragments/configure-docker-storage.sh magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh magnum/drivers/common/templates/fragments/enable-docker-registry.sh magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml magnum/drivers/common/templates/fragments/network_switcher_existing.yaml magnum/drivers/common/templates/fragments/network_switcher_private.yaml magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh magnum/drivers/common/templates/kubernetes/fragments/core-dns-service.sh magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh magnum/drivers/common/templates/kubernetes/fragments/enable-prometheus-monitoring magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh magnum/drivers/common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh magnum/drivers/common/templates/kubernetes/fragments/network-service.sh magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh magnum/drivers/common/templates/kubernetes/fragments/wc-notify-master.sh magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.yaml magnum/drivers/common/templates/kubernetes/fragments/write-kube-os-config.sh magnum/drivers/common/templates/kubernetes/fragments/write-network-config.sh magnum/drivers/common/templates/swarm/fragments/add-docker-daemon-options.sh magnum/drivers/common/templates/swarm/fragments/add-proxy.sh magnum/drivers/common/templates/swarm/fragments/cfn-signal.sh magnum/drivers/common/templates/swarm/fragments/configure-etcd.sh magnum/drivers/common/templates/swarm/fragments/configure-selinux.sh magnum/drivers/common/templates/swarm/fragments/enable-services.sh magnum/drivers/common/templates/swarm/fragments/make-cert.py magnum/drivers/common/templates/swarm/fragments/network-config-service.sh magnum/drivers/common/templates/swarm/fragments/network-service.sh magnum/drivers/common/templates/swarm/fragments/remove-docker-key.sh magnum/drivers/common/templates/swarm/fragments/volume-service.sh magnum/drivers/common/templates/swarm/fragments/write-cluster-failure-service.yaml magnum/drivers/common/templates/swarm/fragments/write-docker-socket.yaml magnum/drivers/common/templates/swarm/fragments/write-heat-params-master.yaml magnum/drivers/common/templates/swarm/fragments/write-heat-params-node.yaml magnum/drivers/common/templates/swarm/fragments/write-network-config.sh magnum/drivers/common/templates/swarm/fragments/write-swarm-agent-service.sh magnum/drivers/common/templates/swarm/fragments/write-swarm-master-service.sh magnum/drivers/heat/__init__.py magnum/drivers/heat/driver.py magnum/drivers/heat/k8s_fedora_template_def.py magnum/drivers/heat/k8s_template_def.py magnum/drivers/heat/swarm_fedora_template_def.py magnum/drivers/heat/swarm_mode_template_def.py magnum/drivers/heat/template_def.py magnum/drivers/k8s_coreos_v1/__init__.py magnum/drivers/k8s_coreos_v1/driver.py magnum/drivers/k8s_coreos_v1/template_def.py magnum/drivers/k8s_coreos_v1/version.py magnum/drivers/k8s_coreos_v1/templates/COPYING magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/add-proxy.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/configure-etcd.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/create-kube-namespace.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-coredns.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-apiserver.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-controller-manager.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-dashboard.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-master.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-minion.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-scheduler.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service-client.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert-client.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml magnum/drivers/k8s_coreos_v1/templates/fragments/write-network-config.yaml magnum/drivers/k8s_fedora_atomic_v1/__init__.py magnum/drivers/k8s_fedora_atomic_v1/driver.py magnum/drivers/k8s_fedora_atomic_v1/template_def.py magnum/drivers/k8s_fedora_atomic_v1/version.py magnum/drivers/k8s_fedora_atomic_v1/templates/COPYING magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml magnum/drivers/k8s_fedora_atomic_v1/tools/grafana-prometheus-dashboard.json magnum/drivers/k8s_fedora_ironic_v1/__init__.py magnum/drivers/k8s_fedora_ironic_v1/driver.py magnum/drivers/k8s_fedora_ironic_v1/template_def.py magnum/drivers/k8s_fedora_ironic_v1/version.py magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/Readme.md magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/elements-deps magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/package-installs.yaml magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion.yaml magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion_software_configs.yaml magnum/drivers/mesos_ubuntu_v1/COPYING magnum/drivers/mesos_ubuntu_v1/__init__.py magnum/drivers/mesos_ubuntu_v1/driver.py magnum/drivers/mesos_ubuntu_v1/monitor.py magnum/drivers/mesos_ubuntu_v1/scale_manager.py magnum/drivers/mesos_ubuntu_v1/template_def.py magnum/drivers/mesos_ubuntu_v1/version.py magnum/drivers/mesos_ubuntu_v1/image/Dockerfile magnum/drivers/mesos_ubuntu_v1/image/README.md magnum/drivers/mesos_ubuntu_v1/image/install_imagebuild_deps.sh magnum/drivers/mesos_ubuntu_v1/image/validate_image.sh magnum/drivers/mesos_ubuntu_v1/image/docker/elements-deps magnum/drivers/mesos_ubuntu_v1/image/docker/package-installs.yaml magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/60-disable-docker-service magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/10-add-docker-repo magnum/drivers/mesos_ubuntu_v1/image/mesos/elements-deps magnum/drivers/mesos_ubuntu_v1/image/mesos/package-installs.yaml magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/60-disable-upstart magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/10-apt-repo magnum/drivers/mesos_ubuntu_v1/templates/mesos_slave_software_configs.yaml magnum/drivers/mesos_ubuntu_v1/templates/mesoscluster.yaml magnum/drivers/mesos_ubuntu_v1/templates/mesosmaster.yaml magnum/drivers/mesos_ubuntu_v1/templates/mesosslave.yaml magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-ext-ca-certs.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-proxy.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-master.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-slave.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-master.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-slave.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/volume-service.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/write-heat-params-master.sh magnum/drivers/mesos_ubuntu_v1/templates/fragments/write-heat-params.yaml magnum/drivers/swarm_fedora_atomic_v1/__init__.py magnum/drivers/swarm_fedora_atomic_v1/driver.py magnum/drivers/swarm_fedora_atomic_v1/monitor.py magnum/drivers/swarm_fedora_atomic_v1/template_def.py magnum/drivers/swarm_fedora_atomic_v1/version.py magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/Dockerfile magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/README.rst magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/run_openvswitch_neutron.sh magnum/drivers/swarm_fedora_atomic_v1/templates/COPYING magnum/drivers/swarm_fedora_atomic_v1/templates/README.md magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml magnum/drivers/swarm_fedora_atomic_v1/templates/swarmmaster.yaml magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml magnum/drivers/swarm_fedora_atomic_v2/__init__.py magnum/drivers/swarm_fedora_atomic_v2/driver.py magnum/drivers/swarm_fedora_atomic_v2/monitor.py magnum/drivers/swarm_fedora_atomic_v2/template_def.py magnum/drivers/swarm_fedora_atomic_v2/version.py magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-heat-params-master.yaml magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-master-service.sh magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-worker-service.sh magnum/hacking/__init__.py magnum/hacking/checks.py magnum/objects/__init__.py magnum/objects/base.py magnum/objects/certificate.py magnum/objects/cluster.py magnum/objects/cluster_template.py magnum/objects/federation.py magnum/objects/fields.py magnum/objects/magnum_service.py magnum/objects/quota.py magnum/objects/stats.py magnum/objects/x509keypair.py magnum/service/__init__.py magnum/service/periodic.py magnum/servicegroup/__init__.py magnum/servicegroup/magnum_service_periodic.py magnum/tests/__init__.py magnum/tests/base.py magnum/tests/conf_fixture.py magnum/tests/fake_notifier.py magnum/tests/fakes.py magnum/tests/policy_fixture.py magnum/tests/utils.py magnum/tests/contrib/copy_instance_logs.sh magnum/tests/contrib/gate_hook.sh magnum/tests/contrib/post_test_hook.sh magnum/tests/functional/__init__.py magnum/tests/functional/python_client_base.py magnum/tests/functional/api/__init__.py magnum/tests/functional/api/base.py magnum/tests/functional/api/v1/__init__.py magnum/tests/functional/api/v1/clients/__init__.py magnum/tests/functional/api/v1/clients/bay_client.py magnum/tests/functional/api/v1/clients/baymodel_client.py magnum/tests/functional/api/v1/clients/cert_client.py magnum/tests/functional/api/v1/clients/cluster_client.py magnum/tests/functional/api/v1/clients/cluster_template_client.py magnum/tests/functional/api/v1/clients/magnum_service_client.py magnum/tests/functional/api/v1/models/__init__.py magnum/tests/functional/api/v1/models/bay_model.py magnum/tests/functional/api/v1/models/baymodel_model.py magnum/tests/functional/api/v1/models/baymodelpatch_model.py magnum/tests/functional/api/v1/models/baypatch_model.py magnum/tests/functional/api/v1/models/cert_model.py magnum/tests/functional/api/v1/models/cluster_id_model.py magnum/tests/functional/api/v1/models/cluster_model.py magnum/tests/functional/api/v1/models/cluster_template_model.py magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py magnum/tests/functional/api/v1/models/clusterpatch_model.py magnum/tests/functional/api/v1/models/magnum_service_model.py magnum/tests/functional/common/__init__.py magnum/tests/functional/common/base.py magnum/tests/functional/common/client.py magnum/tests/functional/common/config.py magnum/tests/functional/common/datagen.py magnum/tests/functional/common/manager.py magnum/tests/functional/common/models.py magnum/tests/functional/common/utils.py magnum/tests/functional/k8s/__init__.py magnum/tests/functional/k8s/test_k8s_python_client.py magnum/tests/functional/k8s/test_magnum_python_client.py magnum/tests/functional/k8s_coreos/__init__.py magnum/tests/functional/k8s_coreos/test_k8s_python_client.py magnum/tests/functional/k8s_ironic/__init__.py magnum/tests/functional/k8s_ironic/test_k8s_python_client.py magnum/tests/functional/mesos/__init__.py magnum/tests/functional/mesos/test_mesos_python_client.py magnum/tests/functional/swarm/__init__.py magnum/tests/functional/swarm/test_swarm_python_client.py magnum/tests/functional/swarm_mode/__init__.py magnum/tests/functional/swarm_mode/test_swarm_mode_python_client.py magnum/tests/unit/__init__.py magnum/tests/unit/test_hacking.py magnum/tests/unit/api/__init__.py magnum/tests/unit/api/base.py magnum/tests/unit/api/test_app.py magnum/tests/unit/api/test_attr_validator.py magnum/tests/unit/api/test_expose.py magnum/tests/unit/api/test_hooks.py magnum/tests/unit/api/test_servicegroup.py magnum/tests/unit/api/test_validation.py magnum/tests/unit/api/utils.py magnum/tests/unit/api/controllers/__init__.py magnum/tests/unit/api/controllers/auth-paste.ini magnum/tests/unit/api/controllers/auth-root-access.ini magnum/tests/unit/api/controllers/auth-v1-access.ini magnum/tests/unit/api/controllers/noauth-paste.ini magnum/tests/unit/api/controllers/test_base.py magnum/tests/unit/api/controllers/test_root.py magnum/tests/unit/api/controllers/v1/__init__.py magnum/tests/unit/api/controllers/v1/test_bay.py magnum/tests/unit/api/controllers/v1/test_baymodel.py magnum/tests/unit/api/controllers/v1/test_certificate.py magnum/tests/unit/api/controllers/v1/test_cluster.py magnum/tests/unit/api/controllers/v1/test_cluster_template.py magnum/tests/unit/api/controllers/v1/test_federation.py magnum/tests/unit/api/controllers/v1/test_magnum_service.py magnum/tests/unit/api/controllers/v1/test_quota.py magnum/tests/unit/api/controllers/v1/test_stats.py magnum/tests/unit/api/controllers/v1/test_types.py magnum/tests/unit/api/controllers/v1/test_utils.py magnum/tests/unit/cmd/__init__.py magnum/tests/unit/cmd/test_api.py magnum/tests/unit/cmd/test_conductor.py magnum/tests/unit/cmd/test_db_manage.py magnum/tests/unit/cmd/test_driver_manage.py magnum/tests/unit/common/__init__.py magnum/tests/unit/common/test_clients.py magnum/tests/unit/common/test_context.py magnum/tests/unit/common/test_docker_utils.py magnum/tests/unit/common/test_exception.py magnum/tests/unit/common/test_keystone.py magnum/tests/unit/common/test_policy.py magnum/tests/unit/common/test_profiler.py magnum/tests/unit/common/test_rpc.py magnum/tests/unit/common/test_service.py magnum/tests/unit/common/test_short_id.py magnum/tests/unit/common/test_urlfetch.py magnum/tests/unit/common/test_utils.py magnum/tests/unit/common/cert_manager/__init__.py magnum/tests/unit/common/cert_manager/test_barbican.py magnum/tests/unit/common/cert_manager/test_cert_manager.py magnum/tests/unit/common/cert_manager/test_local.py magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py magnum/tests/unit/common/x509/__init__.py magnum/tests/unit/common/x509/test_operations.py magnum/tests/unit/common/x509/test_sign.py magnum/tests/unit/common/x509/test_validator.py magnum/tests/unit/conductor/__init__.py magnum/tests/unit/conductor/test_k8s_api.py magnum/tests/unit/conductor/test_monitors.py magnum/tests/unit/conductor/test_rpcapi.py magnum/tests/unit/conductor/test_scale_manager.py magnum/tests/unit/conductor/test_utils.py magnum/tests/unit/conductor/handlers/__init__.py magnum/tests/unit/conductor/handlers/test_ca_conductor.py magnum/tests/unit/conductor/handlers/test_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_conductor_listener.py magnum/tests/unit/conductor/handlers/test_federation_conductor.py magnum/tests/unit/conductor/handlers/test_indirection_api.py magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py magnum/tests/unit/conductor/handlers/common/__init__.py magnum/tests/unit/conductor/handlers/common/test_cert_manager.py magnum/tests/unit/conductor/handlers/common/test_trust_manager.py magnum/tests/unit/conductor/tasks/__init__.py magnum/tests/unit/conductor/tasks/test_heat_tasks.py magnum/tests/unit/conf/__init__.py magnum/tests/unit/conf/test_conf.py magnum/tests/unit/db/__init__.py magnum/tests/unit/db/base.py magnum/tests/unit/db/test_cluster.py magnum/tests/unit/db/test_cluster_template.py magnum/tests/unit/db/test_federation.py magnum/tests/unit/db/test_magnum_service.py magnum/tests/unit/db/test_quota.py magnum/tests/unit/db/test_x509keypair.py magnum/tests/unit/db/utils.py magnum/tests/unit/db/sqlalchemy/__init__.py magnum/tests/unit/db/sqlalchemy/test_types.py magnum/tests/unit/drivers/__init__.py magnum/tests/unit/drivers/test_heat_driver.py magnum/tests/unit/drivers/test_template_definition.py magnum/tests/unit/objects/__init__.py magnum/tests/unit/objects/test_cluster.py magnum/tests/unit/objects/test_cluster_template.py magnum/tests/unit/objects/test_federation.py magnum/tests/unit/objects/test_fields.py magnum/tests/unit/objects/test_magnum_service.py magnum/tests/unit/objects/test_objects.py magnum/tests/unit/objects/test_x509keypair.py magnum/tests/unit/objects/utils.py magnum/tests/unit/service/__init__.py magnum/tests/unit/service/test_periodic.py magnum/tests/unit/servicegroup/__init__.py magnum/tests/unit/servicegroup/test_magnum_service.py magnum/tests/unit/template/__init__.py magnum/tests/unit/template/test_template.py playbooks/magnum-buildimages-base.yaml playbooks/magnum-functional-base.yaml playbooks/post/upload-images.yaml playbooks/post/upload-logs.yaml playbooks/pre/prepare-workspace-images.yaml playbooks/pre/prepare-workspace.yaml releasenotes/notes/.placeholder releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml releasenotes/notes/bug-1580704-32a0e91e285792ea.yaml releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml releasenotes/notes/bug-1663757-198e1aa8fa810984.yaml releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml releasenotes/notes/docker-volume-type-46044734f5a27661.yaml releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml releasenotes/notes/quota-api-182cd1bc9e706b17.yaml releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml releasenotes/notes/stats-api-68bc66147ac027e6.yaml releasenotes/notes/support-all-tenants-for-admin-a042f5c520d35837.yaml releasenotes/notes/support-policy-and-doc-in-code-0c19e479dbd953c9.yaml releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml releasenotes/notes/update-swarm-73d4340a881bff2f.yaml releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po specs/async-container-operation.rst specs/bay-drivers.rst specs/container-networking-model.rst specs/container-volume-integration-model.rst specs/containers-service.rst specs/create-trustee-user-for-each-bay.rst specs/flatten_attributes.rst specs/magnum-horizon-plugin.rst specs/open-dcos.rst specs/resource-quotas.rst specs/stats-api-spec.rst specs/tls-support-magnum.rst tools/cover.sh tools/flake8wrap.sh tools/pretty_tox.shmagnum-6.1.0/magnum.egg-info/top_level.txt0000664000175100017510000000000713244017674020543 0ustar zuulzuul00000000000000magnum magnum-6.1.0/magnum.egg-info/requires.txt0000664000175100017510000000230613244017674020415 0ustar zuulzuul00000000000000Babel!=2.4.0,>=2.3.4 PyYAML>=3.10 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 WSME>=0.8.0 WebOb>=1.7.1 alembic>=0.8.10 cliff!=2.9.0,>=2.8.0 decorator>=3.4.0 docker>=2.4.2 enum34>=1.0.4 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 iso8601>=0.1.11 jsonpatch!=1.20,>=1.16 keystoneauth1>=3.3.0 keystonemiddleware>=4.17.0 kubernetes>=4.0.0 marathon!=0.9.1,>=0.8.6 netaddr>=0.7.18 oslo.concurrency>=3.25.0 oslo.config>=5.1.0 oslo.context>=2.19.2 oslo.db>=4.27.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.utils>=3.33.0 oslo.versionedobjects>=1.31.2 oslo.reports>=1.18.0 pbr!=2.1.0,>=2.0.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 pycadf!=2.0.0,>=1.1.0 python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 python-glanceclient>=2.8.0 python-heatclient>=1.10.0 python-neutronclient>=6.3.0 python-novaclient>=9.1.0 python-keystoneclient>=3.8.0 requests>=2.14.2 setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=16.0 six>=1.10.0 stevedore>=1.20.0 taskflow>=2.16.0 cryptography!=2.0,>=1.9 Werkzeug>=0.7 [osprofiler] osprofiler>=1.4.0 magnum-6.1.0/magnum.egg-info/pbr.json0000664000175100017510000000005613244017674017473 0ustar zuulzuul00000000000000{"git_version": "dd1a2aa", "is_release": true}magnum-6.1.0/magnum.egg-info/dependency_links.txt0000664000175100017510000000000113244017674022062 0ustar zuulzuul00000000000000 magnum-6.1.0/setup.py0000666000175100017510000000200613244017334014541 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) magnum-6.1.0/babel.cfg0000666000175100017510000000002113244017334014550 0ustar zuulzuul00000000000000[python: **.py]