openstack-resource-agents-2012.2~f3/0000755000000000000000000000000012032330575016020 5ustar rootrootopenstack-resource-agents-2012.2~f3/ChangeLog0000664000000000000000000000105212032330575017572 0ustar rootrootMartin Loschwitz 2012-08-29 Deleted the RAs for nova-objectstore, nova-network and nova-volume due to QA problems Martin Loschwitz 2012-08-29 Add the debian subdirectory for easier packaging Martin Loschwitz 2012-08-29 Add missing reference to DESTDIR to Makefile Martin Loschwitz 2012-08-29 Initial checkin of this repository Martin Loschwitz 2012-08-28 Initial commit openstack-resource-agents-2012.2~f3/ocf/0000775000000000000000000000000012032330575016571 5ustar rootrootopenstack-resource-agents-2012.2~f3/ocf/nova-network0000664000000000000000000003503712032330575021156 0ustar rootroot#!/bin/sh # # # OpenStack Network Service (nova-network) (replaced by Quantum in Folsom) # # Description: Manages an OpenStack Network Service (nova-network) process as an HA resource # # Authors: Sebastien Han & Emilien Macchi # Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han : http://goo.gl/s8hOU # Which are also inspired by the resource agents written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_database_server_port # OCF_RESKEY_amqp_server_port # OCF_RESKEY_zeromq # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-network" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_database_server_port_default="3306" OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_zeromq_default="false" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} : ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Network Service (nova-network) May manage a nova-network instance or a clone set that creates a distributed nova-network cluster. Manages the OpenStack Network Service (nova-network) Location of the OpenStack Nova Network server binary (nova-network) OpenStack Nova Network server binary (nova-network) Location of the OpenStack Network Service (nova-network) configuration file OpenStack Nova Network (nova-network) config file User running OpenStack Network Service (nova-network) OpenStack Network Service (nova-network) user The pid file to use for this OpenStack Network Service (nova-network) instance OpenStack Network Service (nova-network) pid file The listening port number of the database server. Mandatory to perform a monitor check Database listening port The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port If zeromq is used, this will disable the connection test to the AMQP server Zero-MQ usage Additional parameters to pass on to the OpenStack Network Service (nova-network) Additional parameters for nova-network END } ####################################################################### # Functions invoked by resource manager actions nova_network_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_network_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Network (nova-network) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Network (nova-network) is not running" return $OCF_NOT_RUNNING fi } nova_network_monitor() { local rc local pid local rc_db local rc_amqp local network_db_check local network_amqp_check nova_network_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other nova process with the same connection behavior (for example nova-cert) if ocf_is_true "$OCF_RESKEY_zeromq"; then pid=`cat $OCF_RESKEY_pid` network_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? if [ $rc_db -ne 0 ]; then ocf_log err "Nova Network is not connected to the database server: $rc_db" return $OCF_NOT_RUNNING fi else pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID network_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? network_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_amqp=$? if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ocf_log err "Nova Network is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova Network (nova-network) monitor succeeded" return $OCF_SUCCESS } nova_network_start() { local rc nova_network_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova Network (nova-network) already running" return $OCF_SUCCESS fi # run the actual nova-network daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_network_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova Network (nova-network) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova Network (nova-network) started" return $OCF_SUCCESS } nova_network_stop() { local rc local pid nova_network_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova Network (nova-network) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` pid="$pid `ps ax | grep -v grep | grep 'dnsmasq' | cut -c1-5`" ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova Network (nova-network) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_network_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova Network (nova-network) still hasn't stopped yet. Waiting ..." done nova_network_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova Network (nova-network) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova Network (nova-network) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_network_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_network_start;; stop) nova_network_stop;; status) nova_network_status;; monitor) nova_network_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-api0000664000000000000000000002721612032330575020236 0ustar rootroot#!/bin/sh # # # OpenStack NovaAPI (nova-api) # # Description: Manages an OpenStack Nova API (nova-api) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_keystone_get_token_url # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-api" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_url_default="http://127.0.0.1:8774/v2/" OCF_RESKEY_keystone_get_token_url_default="http://127.0.0.1:5000/v2.0/tokens" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_url=${OCF_RESKEY_url_default}} : ${OCF_RESKEY_keystone_get_token_url=${OCF_RESKEY_keystone_get_token_url_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova API Service (nova-api) May manage a nova-api instance or a clone set that creates a distributed nova-api cluster. Manages the OpenStack Nova API (nova-api) Location of the OpenStack Nova API server binary (nova-api) OpenStack Nova API server binary (nova-api) Location of the OpenStack Nova API (nova-api) configuration file OpenStack Nova API (nova-api registry) config file User running OpenStack Nova API (nova-api) OpenStack Nova API (nova-api) user The pid file to use for this OpenStack Nova API (nova-api) instance OpenStack Nova API (nova-api) pid file The default URL to use for monitoring this instance (nova-api) via curl. Important note: the monitor function doesn't accept http return code different than 200, for instance redirection code will generate an error. Don't forget the '/' at the end of your url endpoint. For example http://127.0.0.1:8774/v1.1 won't work and http://127.0.0.1:8774/v1.1/ will. OpenStack Nova API (nova-api) monitor url The default URL to use to acquire a Nova API (nova-api) token for monitoring this instance of OpenStack Nova API (nova-api) OpenStack Nova API (nova-api) url The username to use when connecting with Nova API (nova-api) for monitoring purposes Nova API (nova-api) monitoring login The password to use when connecting Nova API (nova-api) for monitoring purposes Nova API (nova-api) monitoring password The tenant to use when connecting Nova API (nova-api) for monitoring purposes Nova API (nova-api) monitoring tenant Additional parameters to pass on to the OpenStack NovaAPI (nova-api) Additional parameters for nova-api END } ####################################################################### # Functions invoked by resource manager actions nova_api_validate() { local rc check_binary $OCF_RESKEY_binary check_binary curl check_binary tr check_binary grep check_binary cut check_binary head # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_api_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova API (nova-api) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova API (nova-api) is not running" return $OCF_NOT_RUNNING fi } nova_api_monitor() { local rc local token local http_code nova_api_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check detailed information about this specific version of the API. if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ | cut -d'"' -f4 | head --lines 1` http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` rc=$? if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ocf_log err "Failed to connect to the OpenStack Nova API (nova-api): $rc and $http_code" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova API (nova-api) monitor succeeded" return $OCF_SUCCESS } nova_api_start() { local rc nova_api_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova API (nova-api) already running" return $OCF_SUCCESS fi # run the actual nova-api daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_api_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova API (nova-api) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova API (nova-api) started" return $OCF_SUCCESS } nova_api_stop() { local rc local pid nova_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova API (nova-api) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova API (nova-api) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova API (nova-api) still hasn't stopped yet. Waiting ..." done nova_api_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova API (nova-api) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova API (nova-api) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_api_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_api_start;; stop) nova_api_stop;; status) nova_api_status;; monitor) nova_api_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-volume0000664000000000000000000003441112032330575020767 0ustar rootroot#!/bin/sh # # # OpenStack Nova Volume (nova-volume) (replaced by Cinder in Folsom) # # Description: Manages an OpenStack Nova Volume (nova-volume) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_database_server_port # OCF_RESKEY_amqp_server_port # OCF_RESKEY_zeromq # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-volume" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_database_server_port_default="3306" OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_zeromq_default="false" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} : ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Volume (nova-volume) May manage a nova-volume instance or a clone set that creates a distributed nova-volume cluster. Manages the OpenStack Nova Volume (nova-volume) Location of the OpenStack Nova Volume server binary (nova-volume) OpenStack Nova Volume server binary (nova-volume) Location of the OpenStack Nova Volume (nova-volume) configuration file OpenStack Nova Volume (nova-volume) config file User running OpenStack Nova Volume (nova-volume) OpenStack Nova Volume (nova-volume) user The pid file to use for this OpenStack Nova Volume (nova-volume) instance OpenStack Nova Volume (nova-volume) pid file The listening port number of the database server. Mandatory to perform a monitor check Database listening port The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port If zeromq is used, this will disable the connection test to the AMQP server Zero-MQ usage Additional parameters to pass on to the OpenStack Nova Volume (nova-volume) Additional parameters for nova-volume END } ####################################################################### # Functions invoked by resource manager actions nova_volume_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_volume_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Volume (nova-volume) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Volume (nova-volume) is not running" return $OCF_NOT_RUNNING fi } nova_volume_monitor() { local rc local pid local rc_db local rc_amqp local volume_db_check local volume_amqp_check nova_volume_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID # We are sure to hit the scheduler process and not other nova process with the same connection behavior (for example nova-cert) if ocf_is_true "$OCF_RESKEY_zeromq"; then pid=`cat $OCF_RESKEY_pid` volume_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? if [ $rc_db -ne 0 ]; then ocf_log err "Nova Volume is not connected to the database server: $rc_db" return $OCF_NOT_RUNNING fi else pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID volume_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? volume_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_amqp=$? if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ocf_log err "Nova Volume is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova Volume (nova-volume) monitor succeeded" return $OCF_SUCCESS } nova_volume_start() { local rc nova_volume_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack NovaVolume (nova-volume) already running" return $OCF_SUCCESS fi # run the actual nova-volume daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_volume_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack NovaVolume (nova-volume) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack NovaVolume (nova-volume) started" return $OCF_SUCCESS } nova_volume_stop() { local rc local pid nova_volume_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack NovaVolume (nova-volume) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack NovaVolume (nova-volume) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_volume_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack NovaVolume (nova-volume) still hasn't stopped yet. Waiting ..." done nova_volume_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack NovaVolume (nova-volume) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack NovaVolume (nova-volume) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_volume_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_volume_start;; stop) nova_volume_stop;; status) nova_volume_status;; monitor) nova_volume_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/glance-api0000664000000000000000000002575512032330575020532 0ustar rootroot#!/bin/sh # # # OpenStack ImageService (glance-api) # # Description: Manages an OpenStack ImageService (glance-api) process as an HA resource # # Authors: Martin Gerhard Loschwitz # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # (c) 2012 hastexo Professional Services GmbH # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_client_binary # OCF_RESKEY_config # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_os_auth_url # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="glance-api" OCF_RESKEY_config_default="/etc/glance/glance-api.conf" OCF_RESKEY_user_default="glance" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_client_binary_default="glance" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_client_binary=${OCF_RESKEY_client_binary_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack ImageService Service (glance-api) May manage a glance-api instance or a clone set that creates a distributed glance-api cluster. Manages the OpenStack ImageService (glance-api) Location of the OpenStack ImageService server binary (glance-api) OpenStack ImageService server binary (glance-api) Location of the OpenStack ImageService (glance-api) configuration file OpenStack ImageService (glance registry) config file User running OpenStack ImageService (glance-api) OpenStack ImageService (glance-api) user The pid file to use for this OpenStack ImageService (glance-api) instance OpenStack ImageService (glance-api) pid file The username to use when logging into ImageService (glance-api) for monitoring purposes ImageService (glance-api) monitoring login The password to use when logging into ImageService (glance-api) for monitoring purposes ImageService (glance-api) monitoring password The tenant to use when logging into ImageService (glance-api) for monitoring purposes ImageService (glance-api) monitoring tenant The URL pointing to this ImageService (glance-api) instance to use when logging in for monitoring purposes ImageService (glance-api) URL for monitoring login Location of the OpenStack ImageService client binary (glance) OpenStack ImageService server binary (glance) Additional parameters to pass on to the OpenStack ImageService (glance-api) Additional parameters for glance-api END } ####################################################################### # Functions invoked by resource manager actions glance_api_validate() { local rc check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_client_binary # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } glance_api_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack ImageService (glance-api) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack ImageService (glance-api) is not running" return $OCF_NOT_RUNNING fi } glance_api_monitor() { local rc glance_api_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Monitor the RA by retrieving the image list if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then ocf_run -q $OCF_RESKEY_client_binary \ --os-username "$OCF_RESKEY_os_username" \ --os-password "$OCF_RESKEY_os_password" \ --os-tenant-name "$OCF_RESKEY_os_tenant_name" \ --os-auth-url "$OCF_RESKEY_os_auth_url" \ index > /dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "Failed to connect to the OpenStack ImageService (glance-api): $rc" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack ImageService (glance-api) monitor succeeded" return $OCF_SUCCESS } glance_api_start() { local rc glance_api_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack ImageService (glance-api) already running" return $OCF_SUCCESS fi # run the actual glance-api daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file $OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do glance_api_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack ImageService (glance-api) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack ImageService (glance-api) started" return $OCF_SUCCESS } glance_api_stop() { local rc local pid glance_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack ImageService (glance-api) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack ImageService (glance-api) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do glance_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack ImageService (glance-api) still hasn't stopped yet. Waiting ..." done glance_api_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack ImageService (glance-api) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack ImageService (glance-api) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation glance_api_validate || exit $? # What kind of method was invoked? case "$1" in start) glance_api_start;; stop) glance_api_stop;; status) glance_api_status;; monitor) glance_api_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/cinder-volume0000664000000000000000000002537412032330575021300 0ustar rootroot#!/bin/sh # # # OpenStack Cinder Volume (cinder-volume) # # Description: Manages an OpenStack Volumes (cinder-volume) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_amqp_server_port # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="cinder-volume" OCF_RESKEY_config_default="/etc/cinder/cinder.conf" OCF_RESKEY_user_default="cinder" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_amqp_server_port_default="5672" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Volume (cinder-volume) May manage a cinder-volume instance or a clone set that creates a distributed cinder-volume cluster. Manages the OpenStack Nova Volume (cinder-volume) Location of the OpenStack Nova Volume server binary (cinder-volume) OpenStack Nova Volume server binary (cinder-volume) Location of the OpenStack Nova Volume (cinder-volume) configuration file OpenStack Nova Volume (cinder-volume) config file User running OpenStack Nova Volume (cinder-volume) OpenStack Nova Volume (cinder-volume) user The pid file to use for this OpenStack Nova Volume (cinder-volume) instance OpenStack Nova Volume (cinder-volume) pid file The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port Additional parameters to pass on to the OpenStack Nova Volume (cinder-volume) Additional parameters for cinder-volume END } ####################################################################### # Functions invoked by resource manager actions nova_volume_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_volume_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Volume (cinder-volume) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Volume (cinder-volume) is not running" return $OCF_NOT_RUNNING fi } nova_volume_monitor() { local rc local pid local volume_amqp_check nova_volume_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID # We are sure to hit the scheduler process and not other nova process with the same connection behavior (for example nova-cert) pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID volume_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc=$? if [ $rc -ne 0 ]; then ocf_log err "Nova Volume is not connected to the AMQP server: $rc" return $OCF_NOT_RUNNING fi ocf_log debug "OpenStack Nova Volume (cinder-volume) monitor succeeded" return $OCF_SUCCESS } nova_volume_start() { local rc nova_volume_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Cinder Volume (cinder-volume) already running" return $OCF_SUCCESS fi # run the actual cinder-volume daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_volume_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Cinder Volume (cinder-volume) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Cinder Volume (cinder-volume) started" return $OCF_SUCCESS } nova_volume_stop() { local rc local pid nova_volume_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Cinder Volume (cinder-volume) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Cinder Volume (cinder-volume) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_volume_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Cinder Volume (cinder-volume) still hasn't stopped yet. Waiting ..." done nova_volume_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Cinder Volume (cinder-volume) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Cinder Volume (cinder-volume) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_volume_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_volume_start;; stop) nova_volume_stop;; status) nova_volume_status;; monitor) nova_volume_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/cinder-api0000664000000000000000000002743312032330575020540 0ustar rootroot#!/bin/sh # # # OpenStack Cinder API (cinder-api) # # Description: Manages an OpenStack Cinder API (cinder-api) process as an HA resource # # Authors: Emilien Macchi # Mainly inspired by the Nova API written by Sebastien Han # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_keystone_get_token_url # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="cinder-api" OCF_RESKEY_config_default="/etc/cinder/cinder.conf" OCF_RESKEY_user_default="cinder" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_url_default="http://127.0.0.1:8776/v1/" OCF_RESKEY_keystone_get_token_url_default="http://127.0.0.1:5000/v2.0/tokens" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_url=${OCF_RESKEY_url_default}} : ${OCF_RESKEY_keystone_get_token_url=${OCF_RESKEY_keystone_get_token_url_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Cinder API Service (cinder-api) May manage a cinder-api instance or a clone set that creates a distributed cinder-api cluster. Manages the OpenStack Cinder API (cinder-api) Location of the OpenStack Cinder API server binary (cinder-api) OpenStack Cinder API server binary (cinder-api) Location of the OpenStack Cinder API (cinder-api) configuration file OpenStack Cinder API (cinder-api registry) config file User running OpenStack Cinder API (cinder-api) OpenStack Cinder API (cinder-api) user The pid file to use for this OpenStack Cinder API (cinder-api) instance OpenStack Cinder API (cinder-api) pid file The default URL to use for monitoring this instance (cinder-api) via curl. Important note: the monitor function doesn't accept http return code different than 200, for instance redirection code will generate an error. Don't forget the '/' at the end of your url endpoint. For example http://127.0.0.1:8776/v1 won't work and http://127.0.0.1:8776/v1/ will. OpenStack Cinder API (cinder-api) monitor url The default URL to use to acquire a Cinder API (cinder-api) token for monitoring this instance of OpenStack Cinder API (cinder-api) OpenStack Cinder API (cinder-api) url The username to use when connecting with Cinder API (cinder-api) for monitoring purposes Cinder API (cinder-api) monitoring login The password to use when connecting Cinder API (cinder-api) for monitoring purposes Cinder API (cinder-api) monitoring password The tenant to use when connecting Cinder API (cinder-api) for monitoring purposes Cinder API (cinder-api) monitoring tenant Additional parameters to pass on to the OpenStack Cinder API (cinder-api) Additional parameters for cinder-api END } ####################################################################### # Functions invoked by resource manager actions cinder_api_validate() { local rc check_binary $OCF_RESKEY_binary check_binary curl check_binary tr check_binary grep check_binary cut check_binary head # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } cinder_api_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Cinder API (cinder-api) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Cinder API (cinder-api) is not running" return $OCF_NOT_RUNNING fi } cinder_api_monitor() { local rc local token local http_code cinder_api_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check detailed information about this specific version of the API. if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ | cut -d'"' -f4 | head --lines 1` http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` rc=$? if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Cinder API (cinder-api) monitor succeeded" return $OCF_SUCCESS } cinder_api_start() { local rc cinder_api_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Cinder API (cinder-api) already running" return $OCF_SUCCESS fi # run the actual cinder-api daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do cinder_api_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Cinder API (cinder-api) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Cinder API (cinder-api) started" return $OCF_SUCCESS } cinder_api_stop() { local rc local pid cinder_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Cinder API (cinder-api) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Cinder API (cinder-api) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do cinder_api_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Cinder API (cinder-api) still hasn't stopped yet. Waiting ..." done cinder_api_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Cinder API (cinder-api) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Cinder API (cinder-api) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation cinder_api_validate || exit $? # What kind of method was invoked? case "$1" in start) cinder_api_start;; stop) cinder_api_stop;; status) cinder_api_status;; monitor) cinder_api_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/quantum-agent-l30000664000000000000000000002702312032330575021622 0ustar rootroot#!/bin/sh # # # OpenStack L3 Service (quantum-l3-agent) # # Description: Manages an OpenStack L3 Service (quantum-l3-agent) process as an HA resource # # Authors: Emilien Macchi # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_plugin_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_quantum_server_port # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="quantum-l3-agent" OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_plugin_config_default="/etc/quantum/l3_agent.ini" OCF_RESKEY_user_default="quantum" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_quantum_server_port_default="9696" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_quantum_server_port=${OCF_RESKEY_quantum_server_port_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Quantum L3 Service (quantum-l3-agent) May manage a quantum-l3-agent instance or a clone set that creates a distributed quantum-l3-agent cluster. Manages the OpenStack L3 Service (quantum-l3-agent) Location of the OpenStack L3 Server server binary (quantum-l3-agent) OpenStack L3 Server server binary (quantum-l3-agent) Location of the OpenStack Quantum Service (quantum-server) configuration file OpenStack L3 Server (quantum-server) config file Location of the OpenStack L3 Service (quantum-l3-agent) configuration file OpenStack L3 Server (quantum-l3-agent) config file User running OpenStack L3 Service (quantum-l3-agent) OpenStack L3 Service (quantum-l3-agent) user The pid file to use for this OpenStack L3 Service (quantum-l3-agent) instance OpenStack L3 Service (quantum-l3-agent) pid file The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port Additional parameters to pass on to the OpenStack L3 Service (quantum-l3-agent) Additional parameters for quantum-l3-agent END } ####################################################################### # Functions invoked by resource manager actions quantum_l3_agent_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } quantum_l3_agent_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack L3 Server (quantum-l3-agent) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack L3 Server (quantum-l3-agent) is not running" return $OCF_NOT_RUNNING fi } quantum_l3_agent_monitor() { local rc local pid local network_amqp_check quantum_l3_agent_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other Quantum process with the same connection behavior (for example quantum-server) pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID network_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_quantum_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc=$? if [ $rc -ne 0 ]; then ocf_log err "Quantum L3 Server is not connected to the Quantum server: $rc" return $OCF_NOT_RUNNING fi ocf_log debug "OpenStack L3 Server (quantum-l3-agent) monitor succeeded" return $OCF_SUCCESS } quantum_l3_agent_start() { local rc quantum_l3_agent_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack L3 Server (quantum-l3-agent) already running" return $OCF_SUCCESS fi # run the actual quantum-l3-agent daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ --config-file=$OCF_RESKEY_plugin_config --log-file=/var/log/quantum/l3-agent.log $OCF_RESKEY_additional_parameters"' >> \ /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do quantum_l3_agent_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack L3 Server (quantum-l3-agent) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack L3 Server (quantum-l3-agent) started" return $OCF_SUCCESS } quantum_l3_agent_stop() { local rc local pid quantum_l3_agent_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack L3 Server (quantum-l3-agent) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` pid="$pid `ps ax | grep -v grep | grep 'dnsmasq' | cut -c1-5`" ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack L3 Server (quantum-l3-agent) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do quantum_l3_agent_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack L3 Server (quantum-l3-agent) still hasn't stopped yet. Waiting ..." done quantum_l3_agent_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack L3 Server (quantum-l3-agent) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack L3 Server (quantum-l3-agent) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation quantum_l3_agent_validate || exit $? # What kind of method was invoked? case "$1" in start) quantum_l3_agent_start;; stop) quantum_l3_agent_stop;; status) quantum_l3_agent_status;; monitor) quantum_l3_agent_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/quantum-server0000664000000000000000000003165112032330575021520 0ustar rootroot#!/bin/sh # # # OpenStack Quantum Server (quantum-server) # # Description: Manages an OpenStack Quantum Server (quantum-server) process as an HA resource # # Authors: Emilien Macchi # Mainly inspired by the Quantum API resource agent written by Sebastien Han : http://goo.gl/s8hOU # Which is also inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_plugin_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_keystone_get_token_url # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="quantum-server" OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_plugin_config_default="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" OCF_RESKEY_user_default="quantum" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_url_default="http://127.0.0.1:9696" OCF_RESKEY_keystone_get_token_url_default="http://127.0.0.1:5000/v2.0/tokens" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_url=${OCF_RESKEY_url_default}} : ${OCF_RESKEY_keystone_get_token_url=${OCF_RESKEY_keystone_get_token_url_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Quantum Server (quantum-server) May manage a quantum-server instance or a clone set that creates a distributed quantum-server cluster. Manages the OpenStack Quantum Server (quantum-server) Location of the OpenStack Quantum Server server binary (quantum-server) OpenStack Quantum Server server binary (quantum-server) Location of the OpenStack Quantum Server (quantum-server) configuration file OpenStack Quantum Server (quantum-server) config file Location of the OpenStack Default Plugin (Open-vSwitch) configuration file OpenStack OVS (quantum-ovs) config file User running OpenStack Quantum Server (quantum-server) OpenStack Quantum Server (quantum-server) user The pid file to use for this OpenStack Quantum Server (quantum-server) instance OpenStack Quantum Server (quantum-server) pid file The default URL to use for monitoring this instance (quantum-server) via curl. Important note: the monitor function doesn't accept http return code different than 200, for instance redirection code will generate an error. Don't forget the '/' at the end of your url endpoint. For example http://127.0.0.1:9696/v1.1 won't work and http://127.0.0.1:9696/v1.1/ will. OpenStack Quantum API (quantum-server) monitor url The default URL to use to acquire a Quantum API (quantum-server) token for monitoring this instance of OpenStack Quantum API (quantum-server) OpenStack Quantum API (quantum-server) url The username to use when connecting with Quantum API (quantum-server) for monitoring purposes Quantum API (quantum-server) monitoring login The password to use when connecting Quantum API (quantum-server) for monitoring purposes Quantum API (quantum-server) monitoring password The tenant to use when connecting Quantum API (quantum-server) for monitoring purposes Quantum API (quantum-server) monitoring tenant Additional parameters to pass on to the OpenStack Quantum Server (quantum-server) Additional parameters for quantum-server END } ####################################################################### # Functions invoked by resource manager actions quantum_server_validate() { local rc check_binary $OCF_RESKEY_binary check_binary curl check_binary tr check_binary grep check_binary cut check_binary head # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } quantum_server_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Quantum Server (quantum-server) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Quantum Server (quantum-server) is not running" return $OCF_NOT_RUNNING fi } quantum_server_monitor() { local rc local token local http_code quantum_server_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check detailed information about this specific version of the API. if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ | cut -d'"' -f4 | head --lines 1` http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` rc=$? if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ocf_log err "Failed to connect to the OpenStack Quantum API (quantum-server): $rc and $http_code" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Quantum Server (quantum-server) monitor succeeded" return $OCF_SUCCESS } quantum_server_start() { local rc quantum_server_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Quantum Server (quantum-server) already running" return $OCF_SUCCESS fi # run the actual quantum-server daemon with correct configurations files (server + plugin) # Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ --config-file=$OCF_RESKEY_plugin_config --log-file=/var/log/quantum/server.log $OCF_RESKEY_additional_parameters"' >> \ /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do quantum_server_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Quantum Server (quantum-server) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Quantum Server (quantum-server) started" return $OCF_SUCCESS } quantum_server_stop() { local rc local pid quantum_server_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Quantum Server (quantum-server) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Quantum Server (quantum-server) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do quantum_server_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Quantum Server (quantum-server) still hasn't stopped yet. Waiting ..." done quantum_server_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Quantum Server (quantum-server) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Quantum Server (quantum-server) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation quantum_server_validate || exit $? # What kind of method was invoked? case "$1" in start) quantum_server_start;; stop) quantum_server_stop;; status) quantum_server_status;; monitor) quantum_server_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/glance-registry0000664000000000000000000003006112032330575021613 0ustar rootroot#!/bin/sh # # # OpenStack ImageService (glance-registry) # # Description: Manages an OpenStack ImageService (glance-registry) process as an HA resource # # Authors: Martin Gerhard Loschwitz # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # (c) 2012 hastexo Professional Services GmbH # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_keystone_get_token_url # OCF_RESKEY_url # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="glance-registry" OCF_RESKEY_config_default="/etc/glance/glance-registry.conf" OCF_RESKEY_user_default="glance" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_url_default="http://127.0.0.1:9191/images" OCF_RESKEY_keystone_get_token_url_default="http://127.0.0.1:5000/v2.0/tokens" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_url=${OCF_RESKEY_url_default}} : ${OCF_RESKEY_keystone_get_token_url=${OCF_RESKEY_keystone_get_token_url_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack ImageService Service (glance-registry) May manage a glance-registry instance or a clone set that creates a distributed glance-registry cluster. Manages the OpenStack ImageService (glance-registry) Location of the OpenStack ImageService server binary (glance-registry) OpenStack ImageService server binary (glance-registry) Location of the OpenStack ImageService (glance-registry) configuration file OpenStack ImageService (glance registry) config file User running OpenStack ImageService (glance-registry) OpenStack ImageService (glance-registry) user The pid file to use for this OpenStack ImageService (glance-registry) instance OpenStack ImageService (glance-registry) pid file The default URL to use for monitoring this instance (glance-registry) via curl. Important note: the monitor function doesn't accept http return code different than 200, for instance redirection code will generate an error. OpenStack ImageService (glance-registry) monitor url The default URL to use to acquire a ImageService (glance-registry) token for monitoring this instance of OpenStack ImageService (glance-registry) OpenStack ImageService (glance-registry) url The username to use when connecting with ImageService (glance-registry) for monitoring purposes ImageService (glance-registry) monitoring login The password to use when connecting ImageService (glance-registry) for monitoring purposes ImageService (glance-registry) monitoring password The tenant to use when connecting ImageService (glance-registry) for monitoring purposes ImageService (glance-registry) monitoring tenant Additional parameters to pass on to the OpenStack ImageService (glance-registry) Additional parameters for glance-registry END } ####################################################################### # Functions invoked by resource manager actions glance_registry_validate() { local rc check_binary $OCF_RESKEY_binary check_binary curl check_binary tr check_binary grep check_binary cut check_binary head # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } glance_registry_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack ImageService (glance-registry) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack ImageService (glance-registry) is not running" return $OCF_NOT_RUNNING fi } glance_registry_monitor() { local rc local token local http_code glance_registry_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check whether we are supposed to monitor by logging into glance-registry # and do it if that's the case. if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ | cut -d'"' -f4 | head --lines 1` http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` rc=$? if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ocf_log err "Failed to connect to the OpenStack ImageService (glance-registry): $rc and $http_code" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack ImageService (glance-registry) monitor succeeded" return $OCF_SUCCESS } glance_registry_start() { local rc glance_registry_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack ImageService (glance-registry) already running" return $OCF_SUCCESS fi # run the actual glance-registry daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file $OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do glance_registry_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack ImageService (glance-registry) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack ImageService (glance-registry) started" return $OCF_SUCCESS } glance_registry_stop() { local rc local pid glance_registry_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack ImageService (glance-registry) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack ImageService (glance-registry) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do glance_registry_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack ImageService (glance-registry) still hasn't stopped yet. Waiting ..." done glance_registry_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack ImageService (glance-registry) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack ImageService (glance-registry) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation glance_registry_validate || exit $? # What kind of method was invoked? case "$1" in start) glance_registry_start;; stop) glance_registry_stop;; status) glance_registry_status;; monitor) glance_registry_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/cinder-schedule0000664000000000000000000002474512032330575021566 0ustar rootroot#!/bin/sh # # # OpenStack Cinder Scheduler Service (cinder-schedule) # # Description: Manages an OpenStack Cinder Scheduler Service (cinder-schedule) process as an HA resource # # Authors: Emilien Macchi # Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_monitor_binary # OCF_RESKEY_amqp_server_port # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="cinder-schedule" OCF_RESKEY_config_default="/etc/cinder/cinder.conf" OCF_RESKEY_user_default="cinder" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_amqp_server_port_default="5672" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Cinder Scheduler Service (cinder-schedule) May manage a cinder-schedule instance or a clone set that creates a distributed cinder-schedule cluster. Manages the OpenStack Cinder Scheduler Service (cinder-schedule) Location of the OpenStack Cinder Scheduler server binary (cinder-schedule) OpenStack Cinder Scheduler server binary (cinder-schedule) Location of the OpenStack Cinder Scheduler Service (cinder-schedule) configuration file OpenStack Cinder Scheduler (cinder-schedule registry) config file User running OpenStack Cinder Scheduler Service (cinder-schedule) OpenStack Cinder Scheduler Service (cinder-schedule) user The pid file to use for this OpenStack Cinder Scheduler Service (cinder-schedule) instance OpenStack Cinder Scheduler Service (cinder-schedule) pid file The listening port number of the AMQP server. Use for monitoring purposes AMQP listening port Additional parameters to pass on to the OpenStack Cinder Scheduler Service (cinder-schedule) Additional parameters for cinder-schedule END } ####################################################################### # Functions invoked by resource manager actions cinder_scheduler_check_port() { # This function has been taken from the squid RA and improved a bit # The length of the integer must be 4 # Examples of valid port: "1080", "0080" # Examples of invalid port: "1080bad", "0", "0000", "" local int local cnt int="$1" cnt=${#int} echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ocf_log err "Invalid port number: $1" exit $OCF_ERR_CONFIGURED fi } cinder_scheduler_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat cinder_scheduler_check_port $OCF_RESKEY_amqp_server_port # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } cinder_scheduler_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Cinder Scheduler (cinder-schedule) is not running" return $OCF_NOT_RUNNING fi } cinder_scheduler_monitor() { local rc local pid local scheduler_amqp_check cinder_scheduler_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) pid=`cat $OCF_RESKEY_pid` scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc=$? if [ $rc -ne 0 ]; then ocf_log err "Cinder Scheduler is not connected to the AMQP server : $rc" return $OCF_NOT_RUNNING fi ocf_log debug "OpenStack Cinder Scheduler (cinder-schedule) monitor succeeded" return $OCF_SUCCESS } cinder_scheduler_start() { local rc cinder_scheduler_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) already running" return $OCF_SUCCESS fi # run the actual cinder-schedule daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. while true; do cinder_scheduler_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Cinder Scheduler (cinder-schedule) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) started" return $OCF_SUCCESS } cinder_scheduler_stop() { local rc local pid cinder_scheduler_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Cinder Scheduler (cinder-schedule) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do cinder_scheduler_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Cinder Scheduler (cinder-schedule) still hasn't stopped yet. Waiting ..." done cinder_scheduler_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation cinder_scheduler_validate || exit $? # What kind of method was invoked? case "$1" in start) cinder_scheduler_start;; stop) cinder_scheduler_stop;; status) cinder_scheduler_status;; monitor) cinder_scheduler_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-consoleauth0000664000000000000000000003634712032330575022016 0ustar rootroot#!/bin/sh # # # OpenStack Nova ConsoleAuth (nova-consoleauth) # # Description: Manages an OpenStack Nova ConsoleAuth (nova-consoleauth) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_database_server_port # OCF_RESKEY_amqp_server_port # OCF_RESKEY_zeromq # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-consoleauth" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_database_server_port_default="3306" OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_zeromq_default="false" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} : ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Console Auth Service (nova-consoleauth) May manage a nova-consoleauth instance or a clone set that creates a distributed nova-consoleauth cluster. Manages the OpenStack Nova Console Auth (nova-consoleauth) Location of the OpenStack Nova Console Auth server binary (nova-consoleauth) OpenStack Nova Console Auth server binary (nova-consoleauth) Location of the OpenStack Nova Console Auth (nova-consoleauth) configuration file OpenStack Nova Console Auth (nova-consoleauth registry) config file User running OpenStack Nova Console Auth (nova-consoleauth) OpenStack Nova Console Auth (nova-consoleauth) user The pid file to use for this OpenStack Nova Console Auth (nova-consoleauth) instance OpenStack Nova Console Auth (nova-consoleauth) pid file The listening port number of the database server. Mandatory to perform a monitor check Database listening port The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port If zeromq is used, this will disable the connection test to the AMQP server Zero-MQ usage Additional parameters to pass on to the OpenStack Nova Console Auth (nova-consoleauth) Additional parameters for nova-consoleauth END } ####################################################################### # Functions invoked by resource manager actions nova_consoleauth_check_port() { # This function has been taken from the squid RA and improved a bit # The length of the integer must be 4 # Examples of valid port: "1080", "0080" # Examples of invalid port: "1080bad", "0", "0000", "" local int local cnt int="$1" cnt=${#int} echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ocf_log err "Invalid port number: $1" exit $OCF_ERR_CONFIGURED fi } nova_consoleauth_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat nova_consoleauth_check_port $OCF_RESKEY_database_server_port nova_consoleauth_check_port $OCF_RESKEY_amqp_server_port # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_consoleauth_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Console Auth (nova-consoleauth) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Console Auth (nova-consoleauth) is not running" return $OCF_NOT_RUNNING fi } nova_consoleauth_monitor() { local rc local token local rc_database local rc_amqp local console_db_check local console_amqp_check nova_consoleauth_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other nova process with the same connection behavior (for example nova-scheduler) if ocf_is_true "$OCF_RESKEY_zeromq"; then pid=`cat $OCF_RESKEY_pid` console_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? if [ $rc_db -ne 0 ]; then ocf_log err "Nova Console Auth is not connected to the database server: $rc_db" return $OCF_NOT_RUNNING fi else pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID console_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? console_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | egrep -s "$pid" | grep -qs "ESTABLISHED"` rc_amqp=$? if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ocf_log err "Nova Console Auth is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova Console Auth (nova-consoleauth) monitor succeeded" return $OCF_SUCCESS } nova_consoleauth_start() { local rc nova_consoleauth_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) already running" return $OCF_SUCCESS fi # run the actual nova-consoleauth daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_consoleauth_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova ConsoleAuth (nova-consoleauth) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) started" return $OCF_SUCCESS } nova_consoleauth_stop() { local rc local pid nova_consoleauth_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova ConsoleAuth (nova-consoleauth) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_consoleauth_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova ConsoleAuth (nova-consoleauth) still hasn't stopped yet. Waiting ..." done nova_consoleauth_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_consoleauth_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_consoleauth_start;; stop) nova_consoleauth_stop;; status) nova_consoleauth_status;; monitor) nova_consoleauth_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-novnc0000664000000000000000000002424012032330575020602 0ustar rootroot#!/bin/sh # # # OpenStack Nova VNC Console (nova-novncproxy) # # Description: Manages an OpenStack Nova VNC Console (nova-novncproxy) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_console_port # OCF_RESKEY_web # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-novncproxy" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_console_port_default="6080" OCF_RESKEY_web_default="/usr/share/novnc/" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_console_port=${OCF_RESKEY_console_port_default}} : ${OCF_RESKEY_web=${OCF_RESKEY_web_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova VNC Console Service (nova-novncproxy) May manage a nova-novncproxy instance or a clone set that creates a distributed nova-novncproxy cluster. Manages the OpenStack Nova VNC Console (nova-novncproxy) Location of the OpenStack Nova VNC Console server binary (nova-novncproxy) OpenStack Nova VNC Console server binary (nova-novncproxy) Location of the OpenStack Nova VNC Console (nova-novncproxy) configuration file OpenStack Nova VNC Console (nova-novncproxy registry) config file User running OpenStack Nova VNC Console (nova-novncproxy) OpenStack Nova VNC Console (nova-novncproxy) user VNC console type running: nova-novnc OpenStack Nova VNC Console (nova-novncproxy) console type VNC console web URL OpenStack Nova VNC Console (nova-novncproxy) web URL The pid file to use for this OpenStack Nova VNC Console (nova-novncproxy) instance OpenStack Nova VNC Console (nova-novncproxy) pid file Additional parameters to pass on to the OpenStack Nova VNC Console (nova-novncproxy) Additional parameters for nova-novncproxy END } ####################################################################### # Functions invoked by resource manager actions nova_vnc_console_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_vnc_console_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova VNC Console (nova-novncproxy) is not running" return $OCF_NOT_RUNNING fi } nova_vnc_console_monitor() { local rc local vnc_list_check nova_vnc_console_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check whether we are supposed to monitor by logging into nova-novncproxy # and do it if that's the case. vnc_list_check=`netstat -a | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` rc=$? if [ $rc -ne 0 ]; then ocf_log err "Nova VNC Console doesn't seem to listen on his default port: $rc" return $OCF_NOT_RUNNING fi ocf_log debug "OpenStack Nova VNC Console (nova-novncproxy) monitor succeeded" return $OCF_SUCCESS } nova_vnc_console_start() { local rc nova_vnc_console_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) already running" return $OCF_SUCCESS fi # run the actual nova-novncproxy daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config --web /usr/share/novnc/ \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_vnc_console_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova VNC Console (nova-novncproxy) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) started" return $OCF_SUCCESS } nova_vnc_console_stop() { local rc local pid nova_vnc_console_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova VNC Console (nova-novncproxy) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_vnc_console_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova VNC Console (nova-novncproxy) still hasn't stopped yet. Waiting ..." done nova_vnc_console_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_vnc_console_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_vnc_console_start;; stop) nova_vnc_console_stop;; status) nova_vnc_console_status;; monitor) nova_vnc_console_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-cert0000664000000000000000000003520712032330575020421 0ustar rootroot#!/bin/sh # # # OpenStack Nova Cert (nova-cert) # # Description: Manages an OpenStack Nova Cert (nova-cert) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_database_server_port # OCF_RESKEY_amqp_server_port # OCF_RESKEY_zeromq # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-cert" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_database_server_port_default="3306" OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_zeromq_default="false" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} : ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Cert Service (nova-cert) May manage a nova-cert instance or a clone set that creates a distributed nova-cert cluster. Manages the OpenStack Nova Cert (nova-cert) Location of the OpenStack Nova Cert server binary (nova-cert) OpenStack Nova Cert server binary (nova-cert) Location of the OpenStack Nova Cert (nova-cert) configuration file OpenStack Nova Cert (nova-cert registry) config file User running OpenStack Nova Cert (nova-cert) OpenStack Nova Cert (nova-cert) user The pid file to use for this OpenStack Nova Cert (nova-cert) instance OpenStack Nova Cert (nova-cert) pid file The listening port number of the database server. Mandatory to perform a monitor check Database listening port The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port If zeromq is used, this will disable the connection test to the AMQP server Zero-MQ usage Additional parameters to pass on to the OpenStack Nova Cert (nova-cert) Additional parameters for nova-cert END } ####################################################################### # Functions invoked by resource manager actions nova_cert_check_port() { # This function has been taken from the squid RA and improved a bit # The length of the integer must be 4 # Examples of valid port: "1080", "0080" # Examples of invalid port: "1080bad", "0", "0000", "" local int local cnt int="$1" cnt=${#int} echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ocf_log err "Invalid port number: $1" exit $OCF_ERR_CONFIGURED fi } nova_cert_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat nova_cert_check_port $OCF_RESKEY_database_server_port nova_cert_check_port $OCF_RESKEY_amqp_server_port # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_cert_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Cert (nova-cert) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Cert (nova-cert) is not running" return $OCF_NOT_RUNNING fi } nova_cert_monitor() { local rc local pid local rc_db local rc_amqp local cert_db_check local cert_amqp_check nova_cert_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the cert process and not other nova process with the same connection behavior (for example nova-scheduler) if ocf_is_true "$OCF_RESKEY_zeromq"; then pid=`cat $OCF_RESKEY_pid` cert_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? if [ $rc_db -ne 0 ]; then ocf_log err "Nova Cert is not connected to the database server: $rc_db" return $OCF_NOT_RUNNING fi else pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID cert_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -sq "ESTABLISHED"` rc_db=$? cert_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -sq "ESTABLISHED"` rc_amqp=$? if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ocf_log err "Nova Cert is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova Cert (nova-cert) monitor succeeded" return $OCF_SUCCESS } nova_cert_start() { local rc nova_cert_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova Cert (nova-cert) already running" return $OCF_SUCCESS fi # run the actual nova-cert daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do nova_cert_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova Cert (nova-cert) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova Cert (nova-cert) started" return $OCF_SUCCESS } nova_cert_stop() { local rc local pid nova_cert_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova Cert (nova-cert) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova Cert (nova-cert) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_cert_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova Cert (nova-cert) still hasn't stopped yet. Waiting ..." done nova_cert_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova Cert (nova-cert) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova Cert (nova-cert) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_cert_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_cert_start;; stop) nova_cert_stop;; status) nova_cert_status;; monitor) nova_cert_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/keystone0000664000000000000000000002536112032330575020364 0ustar rootroot#!/bin/sh # # # OpenStack Keystone # # Description: Manages an OpenStack Keystone process as an HA resource # # Authors: Martin Gerhard Loschwitz # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # (c) 2012 hastexo Professional Services GmbH # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_client_binary # OCF_RESKEY_config # OCF_RESKEY_os_username # OCF_RESKEY_os_password # OCF_RESKEY_os_tenant_name # OCF_RESKEY_os_auth_url # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="keystone-all" OCF_RESKEY_config_default="/etc/keystone/keystone.conf" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_user_default="keystone" OCF_RESKEY_client_binary_default="keystone" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_client_binary=${OCF_RESKEY_client_binary_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Identity Service (Keystone) May manage a keystone-all instance or a clone set that creates a distributed keystone cluster. Manages the OpenStack Identity Service (Keystone) Location of the OpenStack Identity Service server binary (keystone-all) Keystone server binary (keystone-all) Location of the OpenStack Identity Service client binary (keystone) Keystone server binary (keystone) Location of the OpenStack Identity Service configuration file Keystone configuration file The username to use when logging into Keystone for monitoring purposes Keystone monitoring login The password to use when logging into Keystone for monitoring purposes Keystone monitoring password The tenant to use when logging into Keystone for monitoring purposes Keystone monitoring tenant The URL pointing to this Keystone instance to use when logging in for monitoring purposes Keystone URL for monitoring login User running OpenStack Identity (Keystone) OpenStack Identity (Keystone) user The pid file to use for this Keystone instance (keystone-all) OpenStack Identity (Keystone) pid file Additional parameters to pass on to the Keystone server (keystone-all) Additional parameters for the Keystone server END } ####################################################################### # Functions invoked by resource manager actions keystone_validate() { local rc check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_client_binary # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } keystone_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Identity (Keystone) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Identity (Keystone) is not running" return $OCF_NOT_RUNNING fi } keystone_monitor() { local rc keystone_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check whether we are supposed to monitor by logging into Keystone # and do it if that's the case. if [ -n "$OCF_RESKEY_client_binary" ] && [ -n "$OCF_RESKEY_os_username" ] \ && [ -n "$OCF_RESKEY_os_password" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] \ && [ -n "$OCF_RESKEY_os_auth_url" ]; then ocf_run -q $OCF_RESKEY_client_binary \ --os-username "$OCF_RESKEY_os_username" \ --os-password "$OCF_RESKEY_os_password" \ --os-tenant-name "$OCF_RESKEY_os_tenant_name" \ --os-auth-url "$OCF_RESKEY_os_auth_url" \ user-list > /dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "Failed to connect to the OpenStack Identity (Keystone): $rc" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Identity (Keystone) monitor succeeded" return $OCF_SUCCESS } keystone_start() { local rc keystone_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Identity (Keystone) already running" return $OCF_SUCCESS fi # run the actual keystone daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file $OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do sleep 1 keystone_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Identity (Keystone) start failed" exit $OCF_ERR_GENERIC fi done ocf_log info "OpenStack Identity (Keystone) started" return $OCF_SUCCESS } keystone_stop() { local rc local pid keystone_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Identity (Keystone) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Identity (Keystone) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do keystone_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Identity (Keystone) still hasn't stopped yet. Waiting ..." done keystone_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Identity (Keystone) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Identity (Keystone) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation keystone_validate || exit $? # What kind of method was invoked? case "$1" in start) keystone_start;; stop) keystone_stop;; status) keystone_status;; monitor) keystone_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/nova-scheduler0000664000000000000000000003603212032330575021437 0ustar rootroot#!/bin/sh # # # OpenStack Scheduler Service (nova-scheduler) # # Description: Manages an OpenStack Scheduler Service (nova-scheduler) process as an HA resource # # Authors: Sébastien Han # Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_monitor_binary # OCF_RESKEY_database_server_port # OCF_RESKEY_amqp_server_port # OCF_RESKEY_zeromq # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="nova-scheduler" OCF_RESKEY_config_default="/etc/nova/nova.conf" OCF_RESKEY_user_default="nova" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_database_server_port_default="3306" OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_zeromq_default="false" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} : ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Nova Scheduler Service (nova-scheduler) May manage a nova-scheduler instance or a clone set that creates a distributed nova-scheduler cluster. Manages the OpenStack Scheduler Service (nova-scheduler) Location of the OpenStack Nova Scheduler server binary (nova-scheduler) OpenStack Nova Scheduler server binary (nova-scheduler) Location of the OpenStack Scheduler Service (nova-scheduler) configuration file OpenStack Nova Scheduler (nova-scheduler registry) config file User running OpenStack Scheduler Service (nova-scheduler) OpenStack Scheduler Service (nova-scheduler) user The pid file to use for this OpenStack Scheduler Service (nova-scheduler) instance OpenStack Scheduler Service (nova-scheduler) pid file The listening port number of the database server. Use for monitoring purposes Database listening port The listening port number of the AMQP server. Use for monitoring purposes AMQP listening port If zeromq is used, this will disable the connection test to the AMQP server. Use for monitoring purposes Zero-MQ usage Additional parameters to pass on to the OpenStack Scheduler Service (nova-scheduler) Additional parameters for nova-scheduler END } ####################################################################### # Functions invoked by resource manager actions nova_scheduler_check_port() { # This function has been taken from the squid RA and improved a bit # The length of the integer must be 4 # Examples of valid port: "1080", "0080" # Examples of invalid port: "1080bad", "0", "0000", "" local int local cnt int="$1" cnt=${#int} echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ocf_log err "Invalid port number: $1" exit $OCF_ERR_CONFIGURED fi } nova_scheduler_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat nova_scheduler_check_port $OCF_RESKEY_database_server_port nova_scheduler_check_port $OCF_RESKEY_amqp_server_port # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } nova_scheduler_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack Nova Scheduler (nova-scheduler) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack Nova Scheduler (nova-scheduler) is not running" return $OCF_NOT_RUNNING fi } nova_scheduler_monitor() { local rc local pid local rc_db local rc_amqp local scheduler_db_check local scheduler_amqp_check nova_scheduler_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other nova process with the same connection behavior (for example nova-cert) if ocf_is_true "$OCF_RESKEY_zeromq"; then pid=`cat $OCF_RESKEY_pid` scheduler_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? if [ $rc_db -ne 0 ]; then ocf_log err "Nova Scheduler is not connected to the database server: $rc_db" return $OCF_NOT_RUNNING fi else pid=`cat $OCF_RESKEY_pid` scheduler_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_db=$? scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc_amqp=$? if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ocf_log err "Nova Scheduler is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" return $OCF_NOT_RUNNING fi fi ocf_log debug "OpenStack Nova Scheduler (nova-scheduler) monitor succeeded" return $OCF_SUCCESS } nova_scheduler_start() { local rc nova_scheduler_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack Nova Scheduler (nova-scheduler) already running" return $OCF_SUCCESS fi # run the actual nova-scheduler daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. while true; do nova_scheduler_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack Nova Scheduler (nova-scheduler) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack Nova Scheduler (nova-scheduler) started" return $OCF_SUCCESS } nova_scheduler_stop() { local rc local pid nova_scheduler_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack Nova Scheduler (nova-scheduler) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack Nova Scheduler (nova-scheduler) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do nova_scheduler_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack Nova Scheduler (nova-scheduler) still hasn't stopped yet. Waiting ..." done nova_scheduler_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack Nova Scheduler (nova-scheduler) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack Nova Scheduler (nova-scheduler) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation nova_scheduler_validate || exit $? # What kind of method was invoked? case "$1" in start) nova_scheduler_start;; stop) nova_scheduler_stop;; status) nova_scheduler_status;; monitor) nova_scheduler_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/ocf/quantum-agent-dhcp0000664000000000000000000002722412032330575022225 0ustar rootroot#!/bin/sh # # # OpenStack DHCP Service (quantum-dhcp-agent) # # Description: Manages an OpenStack DHCP Service (quantum-dhcp-agent) process as an HA resource # # Authors: Emilien Macchi # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # # Support: openstack@lists.launchpad.net # License: Apache Software License (ASL) 2.0 # # # See usage() function below for more details ... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_config # OCF_RESKEY_plugin_config # OCF_RESKEY_user # OCF_RESKEY_pid # OCF_RESKEY_amqp_server_port # OCF_RESKEY_additional_parameters ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Fill in some defaults if no values are specified OCF_RESKEY_binary_default="quantum-dhcp-agent" OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_plugin_config_default="/etc/quantum/dhcp_agent.ini" OCF_RESKEY_user_default="quantum" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_amqp_server_port_default="5672" : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} : ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}} : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the OpenStack Quantum DHCP Service (quantum-dhcp-agent) May manage a quantum-dhcp-agent instance or a clone set that creates a distributed quantum-dhcp-agent cluster. Manages the OpenStack DHCP Service (quantum-dhcp-agent) Location of the OpenStack DHCP Server server binary (quantum-dhcp-agent) OpenStack DHCP Server server binary (quantum-dhcp-agent) Location of the OpenStack Quantum Service (quantum-server) configuration file OpenStack DHCP Server (quantum-server) config file Location of the OpenStack DHCP Service (quantum-dhcp-agent) configuration file OpenStack DHCP Server (quantum-dhcp-agent) config file User running OpenStack DHCP Service (quantum-dhcp-agent) OpenStack DHCP Service (quantum-dhcp-agent) user The pid file to use for this OpenStack DHCP Service (quantum-dhcp-agent) instance OpenStack DHCP Service (quantum-dhcp-agent) pid file The listening port number of the AMQP server. Mandatory to perform a monitor check AMQP listening port Additional parameters to pass on to the OpenStack DHCP Service (quantum-dhcp-agent) Additional parameters for quantum-dhcp-agent END } ####################################################################### # Functions invoked by resource manager actions quantum_dhcp_agent_validate() { local rc check_binary $OCF_RESKEY_binary check_binary netstat # A config file on shared storage that is not available # during probes is OK. if [ ! -f $OCF_RESKEY_config ]; then if ! ocf_is_probe; then ocf_log err "Config $OCF_RESKEY_config doesn't exist" return $OCF_ERR_INSTALLED fi ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" fi getent passwd $OCF_RESKEY_user >/dev/null 2>&1 rc=$? if [ $rc -ne 0 ]; then ocf_log err "User $OCF_RESKEY_user doesn't exist" return $OCF_ERR_INSTALLED fi true } quantum_dhcp_agent_status() { local pid local rc if [ ! -f $OCF_RESKEY_pid ]; then ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) is not running" return $OCF_NOT_RUNNING else pid=`cat $OCF_RESKEY_pid` fi ocf_run -warn kill -s 0 $pid rc=$? if [ $rc -eq 0 ]; then return $OCF_SUCCESS else ocf_log info "Old PID file found, but OpenStack DHCP Server (quantum-dhcp-agent) is not running" return $OCF_NOT_RUNNING fi } quantum_dhcp_agent_monitor() { local rc local pid local network_amqp_check quantum_dhcp_agent_status rc=$? # If status returned anything but success, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi # Check the connections according to the PID. # We are sure to hit the scheduler process and not other Quantum process with the same connection behavior (for example quantum-server) pid=`cat $OCF_RESKEY_pid` # check the connections according to the PID network_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` rc=$? if [ $rc -ne 0 ]; then ocf_log err "Quantum DHCP Server is not connected to the AMQP server : $rc" return $OCF_NOT_RUNNING fi ocf_log debug "OpenStack DHCP Server (quantum-dhcp-agent) monitor succeeded" return $OCF_SUCCESS } quantum_dhcp_agent_start() { local rc quantum_dhcp_agent_status rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) already running" return $OCF_SUCCESS fi # run the actual quantum-dhcp-agent daemon. Don't use ocf_run as we're sending the tool's output # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ --config-file=$OCF_RESKEY_plugin_config --log-file=/var/log/quantum/dhcp-agent.log $OCF_RESKEY_additional_parameters"' >> \ /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required while true; do quantum_dhcp_agent_monitor rc=$? [ $rc -eq $OCF_SUCCESS ] && break if [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_log err "OpenStack DHCP Server (quantum-dhcp-agent) start failed" exit $OCF_ERR_GENERIC fi sleep 1 done ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) started" return $OCF_SUCCESS } quantum_dhcp_agent_stop() { local rc local pid quantum_dhcp_agent_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) already stopped" return $OCF_SUCCESS fi # Try SIGTERM pid=`cat $OCF_RESKEY_pid` pid="$pid `ps ax | grep -v grep | grep 'dnsmasq' | cut -c1-5`" ocf_run kill -s TERM $pid rc=$? if [ $rc -ne 0 ]; then ocf_log err "OpenStack DHCP Server (quantum-dhcp-agent) couldn't be stopped" exit $OCF_ERR_GENERIC fi # stop waiting shutdown_timeout=15 if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) fi count=0 while [ $count -lt $shutdown_timeout ]; do quantum_dhcp_agent_status rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then break fi count=`expr $count + 1` sleep 1 ocf_log debug "OpenStack DHCP Server (quantum-dhcp-agent) still hasn't stopped yet. Waiting ..." done quantum_dhcp_agent_status rc=$? if [ $rc -ne $OCF_NOT_RUNNING ]; then # SIGTERM didn't help either, try SIGKILL ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) failed to stop after ${shutdown_timeout}s \ using SIGTERM. Trying SIGKILL ..." ocf_run kill -s KILL $pid fi ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) stopped" rm -f $OCF_RESKEY_pid return $OCF_SUCCESS } ####################################################################### case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac # Anything except meta-data and help must pass validation quantum_dhcp_agent_validate || exit $? # What kind of method was invoked? case "$1" in start) quantum_dhcp_agent_start;; stop) quantum_dhcp_agent_stop;; status) quantum_dhcp_agent_status;; monitor) quantum_dhcp_agent_monitor;; validate-all) ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac openstack-resource-agents-2012.2~f3/Makefile0000664000000000000000000000163712032330575017471 0ustar rootroot#!/usr/bin/make -f # # Makefile for the OpenStack resource agents toolsuite # # Copyright (C) 2012 hastexo Professional Services GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS # IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language # governing permissions and limitations under the License. # define some common variables INSTALL = /usr/bin/install default: install: mkdir -p $(DESTDIR)/usr/lib/ocf/resource.d/openstack for file in ocf/*; do \ $(INSTALL) -t $(DESTDIR)/usr/lib/ocf/resource.d/openstack -m 0755 $${file} ; \ done openstack-resource-agents-2012.2~f3/README.md0000664000000000000000000000015612032330575017303 0ustar rootrootopenstack-resource-agents ========================= Pacemaker High Availability resource agents for OpenStackopenstack-resource-agents-2012.2~f3/debian/0000775000000000000000000000000012032330575017244 5ustar rootrootopenstack-resource-agents-2012.2~f3/debian/copyright0000664000000000000000000000264712032330575021210 0ustar rootrootFormat: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: openstack-resource-agents Source: http://github.com/madkiss/openstack-resource-agents Files: Makefile debian/* ocf/glance-api ocf/glance-registry ocf/keystone Copyright: 2012 hastexo Professional Services GmbH License: Apache Files: ocf/nova-api ocf/nova-cert ocf/nova-consoleauth ocf/nova-novnc ocf/nova-scheduler Copyright: 2012 Sebastien Han License: Apache Files: ocf/quantum-server Copyright: 2012 Emilien Macchi License: Apache License: Apache Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at . http://www.apache.org/licenses/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. . On Debian systems, the complete text of the Apache 2.0 license can be found at "/usr/share/common-licenses/Apache-2.0" express or implied. See the License for the specific language governing permissions and limitations under the License. . On Debian systems, the complete text of the Apache 2.0 license can be found at "/usr/share/common-licenses/Apache-2.0" openstack-resource-agents-2012.2~f3/debian/changelog0000664000000000000000000000023712032330575021120 0ustar rootrootopenstack-resource-agents (2012.2~f3-1) unstable; urgency=low * Initial release -- Martin Loschwitz Tue, 28 Aug 2012 23:08:19 +0000 openstack-resource-agents-2012.2~f3/debian/control0000664000000000000000000000202512032330575020646 0ustar rootrootSource: openstack-resource-agents Section: admin Priority: extra Maintainer: Martin Loschwitz Build-Depends: debhelper (>= 9.0.0) Standards-Version: 3.9.3 Homepage: http://github.com/madkiss/openstack-resource-agents #Vcs-Git: git://github.com/madkiss/openstack-resource-agents.git Package: openstack-resource-agents Architecture: all Depends: ${misc:Depends}, netstat, python-keystoneclient, python-glanceclient, python-novaclient, curl Description: pacemaker resource agents for OpenStack This package contains resource agents to run most of the OpenStack components inside a pacemaker-controlled high availability cluster. Agents for the following OpenStack components are included: . * glance-api * glance-registry * keystone * nova-api * nova-cert * nova-consoleauth * nova-network * nova-novnc * nova-objectstore * nova-scheduler * nova-volume * quantum-server . Install this package if you want this computer to act as member of a pacemaker HA cluster running OpenStack components. openstack-resource-agents-2012.2~f3/debian/source/0000775000000000000000000000000012032330575020544 5ustar rootrootopenstack-resource-agents-2012.2~f3/debian/source/format0000664000000000000000000000001412032330575021752 0ustar rootroot3.0 (quilt) openstack-resource-agents-2012.2~f3/debian/rules0000775000000000000000000000067312032330575020332 0ustar rootroot#!/usr/bin/make -f # -*- makefile -*- # Sample debian/rules that uses debhelper. # This file was originally written by Joey Hess and Craig Small. # As a special exception, when this file is copied by dh-make into a # dh-make output file, you may use that output file without restriction. # This special exception was added by Craig Small in version 0.37 of dh-make. # Uncomment this to turn on verbose mode. # export DH_VERBOSE=1 %: dh $@ openstack-resource-agents-2012.2~f3/debian/compat0000664000000000000000000000000212032330575020442 0ustar rootroot9 openstack-resource-agents-2012.2~f3/.DS_Store0000664000000000000000000001400412032330575017504 0ustar rootrootBud1spblobbpocfbwspblobbplist00 \WindowBounds[ShowSidebar]ShowStatusBar[ShowPathbar[ShowToolbar\SidebarWidth_{{-0, 95}, {1680, 933}} ".